diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES
index f90dd1e620..cd1f448326 100644
--- a/OWNERS_ALIASES
+++ b/OWNERS_ALIASES
@@ -63,7 +63,6 @@ aliases:
eventing-natss-approvers:
- astelmashenko
- dan-j
- - zhaojizhuang
eventing-prometheus-approvers:
- lberk
- matzew
@@ -118,33 +117,29 @@ aliases:
- salaboy
homebrew-kn-plugins-approvers:
- dsimansk
- - maximilien
- rhuss
kn-plugin-admin-approvers:
- - maximilien
+ - dsimansk
- rhuss
- - zhanggbj
kn-plugin-event-approvers:
- cardil
- rhuss
kn-plugin-operator-approvers:
- dsimansk
- houshengbo
- - maximilien
- rhuss
kn-plugin-quickstart-approvers:
- dsimansk
- psschwei
- rhuss
kn-plugin-sample-approvers:
- - maximilien
+ - dsimansk
- rhuss
kn-plugin-service-log-approvers:
- rhuss
kn-plugin-source-kafka-approvers:
- daisy-ycguo
- dsimansk
- - maximilien
- rhuss
kn-plugin-source-kamelet-approvers:
- christophd
@@ -199,7 +194,6 @@ aliases:
- upodroid
security-guard-approvers:
- davidhadas
- - maximilien
- psschwei
- rhuss
security-wg-leads:
diff --git a/control-plane/cmd/kafka-controller/main.go b/control-plane/cmd/kafka-controller/main.go
index add3c600eb..fdcbc43dea 100644
--- a/control-plane/cmd/kafka-controller/main.go
+++ b/control-plane/cmd/kafka-controller/main.go
@@ -19,7 +19,10 @@ package main
import (
"context"
"log"
+ "os"
+ "strings"
+ "github.com/IBM/sarama"
filteredFactory "knative.dev/pkg/client/injection/kube/informers/factory/filtered"
"knative.dev/pkg/configmap"
"knative.dev/pkg/controller"
@@ -70,7 +73,16 @@ func main() {
auth.OIDCLabelSelector,
kafkainternals.DispatcherLabelSelectorStr,
)
- ctx = clientpool.WithKafkaClientPool(ctx)
+
+ if v := os.Getenv("ENABLE_SARAMA_LOGGER"); strings.EqualFold(v, "true") {
+ sarama.Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags|log.Llongfile)
+ }
+ if v := os.Getenv("ENABLE_SARAMA_DEBUG_LOGGER"); strings.EqualFold(v, "true") {
+ sarama.DebugLogger = log.New(os.Stdout, "[sarama][debug] ", log.LstdFlags|log.Llongfile)
+ }
+ if v := os.Getenv("ENABLE_SARAMA_CLIENT_POOL"); v == "" || strings.EqualFold(v, "true") {
+ ctx = clientpool.WithKafkaClientPool(ctx)
+ }
sharedmain.MainNamed(ctx, component,
diff --git a/control-plane/config/eventing-kafka-broker/200-controller/500-controller.yaml b/control-plane/config/eventing-kafka-broker/200-controller/500-controller.yaml
index 13a7df4c1c..58133f2dc2 100644
--- a/control-plane/config/eventing-kafka-broker/200-controller/500-controller.yaml
+++ b/control-plane/config/eventing-kafka-broker/200-controller/500-controller.yaml
@@ -173,6 +173,12 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.name
+ - name: ENABLE_SARAMA_LOGGER
+ value: "false"
+ - name: ENABLE_SARAMA_DEBUG_LOGGER
+ value: "false"
+ - name: ENABLE_SARAMA_CLIENT_POOL
+ value: "true"
ports:
- containerPort: 9090
diff --git a/control-plane/config/post-install/500-post-install-job.yaml b/control-plane/config/post-install/500-post-install-job.yaml
index 393315376e..cf3a7b06ee 100644
--- a/control-plane/config/post-install/500-post-install-job.yaml
+++ b/control-plane/config/post-install/500-post-install-job.yaml
@@ -28,6 +28,7 @@ spec:
labels:
app: kafka-controller-post-install
app.kubernetes.io/version: devel
+ sidecar.istio.io/inject: "false"
annotations:
sidecar.istio.io/inject: "false"
spec:
diff --git a/control-plane/config/post-install/500-storage-version-migrator.yaml b/control-plane/config/post-install/500-storage-version-migrator.yaml
index eafc8a732b..eb3c5abb38 100644
--- a/control-plane/config/post-install/500-storage-version-migrator.yaml
+++ b/control-plane/config/post-install/500-storage-version-migrator.yaml
@@ -28,6 +28,7 @@ spec:
labels:
app: "knative-kafka-storage-version-migrator"
app.kubernetes.io/version: devel
+ sidecar.istio.io/inject: "false"
annotations:
sidecar.istio.io/inject: "false"
spec:
diff --git a/control-plane/pkg/apis/eventing/v1alpha1/implements_test.go b/control-plane/pkg/apis/eventing/v1alpha1/implements_test.go
new file mode 100644
index 0000000000..2cb5c8b55b
--- /dev/null
+++ b/control-plane/pkg/apis/eventing/v1alpha1/implements_test.go
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2020 The Knative Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package v1alpha1
+
+import (
+ "testing"
+
+ "knative.dev/pkg/apis/duck"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+)
+
+func TestTypesImplements(t *testing.T) {
+ testCases := []struct {
+ instance interface{}
+ iface duck.Implementable
+ }{
+ {instance: &KafkaSink{}, iface: &duckv1.Conditions{}},
+ {instance: &KafkaSink{}, iface: &duckv1.Addressable{}},
+ }
+ for _, tc := range testCases {
+ if err := duck.VerifyType(tc.instance, tc.iface); err != nil {
+ t.Error(err)
+ }
+ }
+}
diff --git a/control-plane/pkg/apis/eventing/v1alpha1/kafka_sink_lifecycle.go b/control-plane/pkg/apis/eventing/v1alpha1/kafka_sink_lifecycle.go
index 299f105a41..f061ad35e8 100644
--- a/control-plane/pkg/apis/eventing/v1alpha1/kafka_sink_lifecycle.go
+++ b/control-plane/pkg/apis/eventing/v1alpha1/kafka_sink_lifecycle.go
@@ -22,7 +22,8 @@ import (
)
const (
- ConditionAddressable apis.ConditionType = "Addressable"
+ ConditionAddressable apis.ConditionType = "Addressable"
+ ConditionEventPoliciesReady apis.ConditionType = "EventPoliciesReady"
)
var conditionSet apis.ConditionSet
@@ -54,3 +55,19 @@ func (ks *KafkaSinkStatus) SetAddress(addr *duckv1.Addressable) {
func (kss *KafkaSinkStatus) InitializeConditions() {
kss.GetConditionSet().Manage(kss).InitializeConditions()
}
+
+func (kss *KafkaSinkStatus) MarkEventPoliciesTrue() {
+ kss.GetConditionSet().Manage(kss).MarkTrue(ConditionEventPoliciesReady)
+}
+
+func (kss *KafkaSinkStatus) MarkEventPoliciesTrueWithReason(reason, messageFormat string, messageA ...interface{}) {
+ kss.GetConditionSet().Manage(kss).MarkTrueWithReason(ConditionEventPoliciesReady, reason, messageFormat, messageA...)
+}
+
+func (kss *KafkaSinkStatus) MarkEventPoliciesFailed(reason, messageFormat string, messageA ...interface{}) {
+ kss.GetConditionSet().Manage(kss).MarkFalse(ConditionEventPoliciesReady, reason, messageFormat, messageA...)
+}
+
+func (kss *KafkaSinkStatus) MarkEventPoliciesUnknown(reason, messageFormat string, messageA ...interface{}) {
+ kss.GetConditionSet().Manage(kss).MarkUnknown(ConditionEventPoliciesReady, reason, messageFormat, messageA...)
+}
diff --git a/control-plane/pkg/apis/internalskafkaeventing/v1alpha1/consumer_group_types.go b/control-plane/pkg/apis/internalskafkaeventing/v1alpha1/consumer_group_types.go
index aae63d4110..2e0632e3d0 100644
--- a/control-plane/pkg/apis/internalskafkaeventing/v1alpha1/consumer_group_types.go
+++ b/control-plane/pkg/apis/internalskafkaeventing/v1alpha1/consumer_group_types.go
@@ -19,6 +19,7 @@ package v1alpha1
import (
"strings"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
@@ -108,6 +109,10 @@ type ConsumerGroupSpec struct {
// OIDCServiceAccountName is the name of service account used for this components
// OIDC authentication.
OIDCServiceAccountName *string `json:"oidcServiceAccountName,omitempty"`
+
+ // TopLevelResourceRef is a reference to a top level resource.
+ // For a ConsumerGroup associated with a Trigger, a Broker reference will be set.
+ TopLevelResourceRef *corev1.ObjectReference `json:"topLevelResourceRef,omitempty"`
}
type ConsumerGroupStatus struct {
@@ -210,6 +215,13 @@ func (cg *ConsumerGroup) GetUserFacingResourceRef() *metav1.OwnerReference {
return nil
}
+// GetTopLevelUserFacingResourceRef gets the top level resource reference to the user-facing resources
+// that are backed by this ConsumerGroup using the OwnerReference list.
+// For example, for a Trigger, it will return a Broker reference.
+func (cg *ConsumerGroup) GetTopLevelUserFacingResourceRef() *corev1.ObjectReference {
+ return cg.Spec.TopLevelResourceRef
+}
+
func (cg *ConsumerGroup) IsNotScheduled() bool {
// We want to return true when:
// - the condition isn't present, or
diff --git a/control-plane/pkg/apis/messaging/v1beta1/kafka_channel_lifecycle.go b/control-plane/pkg/apis/messaging/v1beta1/kafka_channel_lifecycle.go
index daa3d03e14..43d3124438 100644
--- a/control-plane/pkg/apis/messaging/v1beta1/kafka_channel_lifecycle.go
+++ b/control-plane/pkg/apis/messaging/v1beta1/kafka_channel_lifecycle.go
@@ -51,6 +51,8 @@ const (
// KafkaChannelConditionChannelServiceReady has status True when the K8S Service representing the channel
// is ready. Because this uses ExternalName, there are no endpoints to check.
KafkaChannelConditionChannelServiceReady apis.ConditionType = "ChannelServiceReady"
+
+ ConditionEventPoliciesReady apis.ConditionType = "EventPoliciesReady"
)
// RegisterAlternateKafkaChannelConditionSet register a different apis.ConditionSet.
@@ -129,3 +131,19 @@ func (kcs *KafkaChannelStatus) MarkChannelServiceFailed(reason, messageFormat st
func (kcs *KafkaChannelStatus) MarkChannelServiceTrue() {
kcs.GetConditionSet().Manage(kcs).MarkTrue(KafkaChannelConditionChannelServiceReady)
}
+
+func (kcs *KafkaChannelStatus) MarkEventPoliciesTrue() {
+ kcs.GetConditionSet().Manage(kcs).MarkTrue(ConditionEventPoliciesReady)
+}
+
+func (kcs *KafkaChannelStatus) MarkEventPoliciesTrueWithReason(reason, messageFormat string, messageA ...interface{}) {
+ kcs.GetConditionSet().Manage(kcs).MarkTrueWithReason(ConditionEventPoliciesReady, reason, messageFormat, messageA...)
+}
+
+func (kcs *KafkaChannelStatus) MarkEventPoliciesFailed(reason, messageFormat string, messageA ...interface{}) {
+ kcs.GetConditionSet().Manage(kcs).MarkFalse(ConditionEventPoliciesReady, reason, messageFormat, messageA...)
+}
+
+func (kcs *KafkaChannelStatus) MarkEventPoliciesUnknown(reason, messageFormat string, messageA ...interface{}) {
+ kcs.GetConditionSet().Manage(kcs).MarkUnknown(ConditionEventPoliciesReady, reason, messageFormat, messageA...)
+}
diff --git a/control-plane/pkg/apis/sources/v1beta1/implements_test.go b/control-plane/pkg/apis/sources/v1beta1/implements_test.go
new file mode 100644
index 0000000000..13117a4cfe
--- /dev/null
+++ b/control-plane/pkg/apis/sources/v1beta1/implements_test.go
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2020 The Knative Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package v1beta1
+
+import (
+ "testing"
+
+ "knative.dev/pkg/apis/duck"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+)
+
+func TestTypesImplements(t *testing.T) {
+ testCases := []struct {
+ instance interface{}
+ iface duck.Implementable
+ }{
+ {instance: &KafkaSource{}, iface: &duckv1.Conditions{}},
+ {instance: &KafkaSource{}, iface: &duckv1.Source{}},
+ }
+ for _, tc := range testCases {
+ if err := duck.VerifyType(tc.instance, tc.iface); err != nil {
+ t.Error(err)
+ }
+ }
+}
diff --git a/control-plane/pkg/core/config/utils.go b/control-plane/pkg/core/config/utils.go
index 580e4067c8..872896a8c3 100644
--- a/control-plane/pkg/core/config/utils.go
+++ b/control-plane/pkg/core/config/utils.go
@@ -24,13 +24,13 @@ import (
"sort"
"strings"
- "knative.dev/eventing/pkg/apis/feature"
- "knative.dev/eventing/pkg/client/listers/eventing/v1alpha1"
+ eventingv1alpha1 "knative.dev/eventing/pkg/apis/eventing/v1alpha1"
"github.com/rickb777/date/period"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
corev1listers "k8s.io/client-go/listers/core/v1"
duck "knative.dev/eventing/pkg/apis/duck/v1"
+ "knative.dev/eventing/pkg/apis/feature"
"knative.dev/eventing/pkg/eventingtls"
"knative.dev/pkg/resolver"
@@ -55,14 +55,18 @@ func ContentModeFromString(mode string) contract.ContentMode {
}
}
-// EventPoliciesFromAppliedEventPoliciesStatus resolves a AppliedEventPoliciesStatus into a list of contract.EventPolicy
-func EventPoliciesFromAppliedEventPoliciesStatus(status duck.AppliedEventPoliciesStatus, lister v1alpha1.EventPolicyLister, namespace string, features feature.Flags) ([]*contract.EventPolicy, error) {
- eventPolicies := make([]*contract.EventPolicy, 0, len(status.Policies))
+// ContractEventPoliciesFromEventPolicies resolves a list of v1alpha1.EventPolicy into a list of contract.EventPolicy
+func ContractEventPoliciesFromEventPolicies(applyingEventPolicies []*eventingv1alpha1.EventPolicy, namespace string, features feature.Flags) []*contract.EventPolicy {
+ if !features.IsOIDCAuthentication() {
+ return nil
+ }
- for _, appliedPolicy := range status.Policies {
- policy, err := lister.EventPolicies(namespace).Get(appliedPolicy.Name)
- if err != nil {
- return nil, fmt.Errorf("failed to get eventPolicy %s: %w", appliedPolicy.Name, err)
+ eventPolicies := make([]*contract.EventPolicy, 0, len(applyingEventPolicies))
+
+ for _, policy := range applyingEventPolicies {
+ if !policy.Status.IsReady() {
+ // only add ready eventpolicies to the contract
+ continue
}
contractPolicy := &contract.EventPolicy{}
@@ -132,7 +136,7 @@ func EventPoliciesFromAppliedEventPoliciesStatus(status duck.AppliedEventPolicie
// else: deny all -> add no additional policy
}
- return eventPolicies, nil
+ return eventPolicies
}
func EgressConfigFromDelivery(
diff --git a/control-plane/pkg/core/config/utils_test.go b/control-plane/pkg/core/config/utils_test.go
index 9d4ff8d2ce..fcb1a55e26 100644
--- a/control-plane/pkg/core/config/utils_test.go
+++ b/control-plane/pkg/core/config/utils_test.go
@@ -23,21 +23,20 @@ import (
"testing"
"time"
+ corev1 "k8s.io/api/core/v1"
eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1"
- "google.golang.org/protobuf/encoding/protojson"
- eventingv1alpha1 "knative.dev/eventing/pkg/apis/eventing/v1alpha1"
- "knative.dev/eventing/pkg/apis/feature"
- reconcilertesting "knative.dev/pkg/reconciler/testing"
-
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
+ "google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/runtime/protoimpl"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/pointer"
eventingduck "knative.dev/eventing/pkg/apis/duck/v1"
+ eventingv1alpha1 "knative.dev/eventing/pkg/apis/eventing/v1alpha1"
+ "knative.dev/eventing/pkg/apis/feature"
"knative.dev/pkg/apis"
duckv1 "knative.dev/pkg/apis/duck/v1"
"knative.dev/pkg/client/injection/ducks/duck/v1/addressable"
@@ -48,7 +47,6 @@ import (
"knative.dev/eventing-kafka-broker/control-plane/pkg/contract"
eventing "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/eventing/v1alpha1"
- eventpolicyinformerfake "knative.dev/eventing/pkg/client/injection/informers/eventing/v1alpha1/eventpolicy/fake"
)
func TestContentModeFromString(t *testing.T) {
@@ -513,7 +511,7 @@ func TestMergeEgressConfig(t *testing.T) {
}
}
-func TestEventPoliciesFromAppliedEventPoliciesStatus(t *testing.T) {
+func TestContractEventPoliciesEventPolicies(t *testing.T) {
tests := []struct {
name string
@@ -522,7 +520,7 @@ func TestEventPoliciesFromAppliedEventPoliciesStatus(t *testing.T) {
namespace string
defaultAuthorizationMode feature.Flag
expected []*contract.EventPolicy
- wantErr bool
+ oidcDisabled bool
}{
{
name: "Exact match",
@@ -539,6 +537,14 @@ func TestEventPoliciesFromAppliedEventPoliciesStatus(t *testing.T) {
From: []string{
"from-1",
},
+ Status: duckv1.Status{
+ Conditions: duckv1.Conditions{
+ {
+ Type: eventingv1alpha1.EventPolicyConditionReady,
+ Status: corev1.ConditionTrue,
+ },
+ },
+ },
},
},
},
@@ -566,6 +572,14 @@ func TestEventPoliciesFromAppliedEventPoliciesStatus(t *testing.T) {
From: []string{
"from-*",
},
+ Status: duckv1.Status{
+ Conditions: duckv1.Conditions{
+ {
+ Type: eventingv1alpha1.EventPolicyConditionReady,
+ Status: corev1.ConditionTrue,
+ },
+ },
+ },
},
},
},
@@ -594,6 +608,14 @@ func TestEventPoliciesFromAppliedEventPoliciesStatus(t *testing.T) {
From: []string{
"from-1",
},
+ Status: duckv1.Status{
+ Conditions: duckv1.Conditions{
+ {
+ Type: eventingv1alpha1.EventPolicyConditionReady,
+ Status: corev1.ConditionTrue,
+ },
+ },
+ },
},
}, {
ObjectMeta: metav1.ObjectMeta{
@@ -604,6 +626,14 @@ func TestEventPoliciesFromAppliedEventPoliciesStatus(t *testing.T) {
From: []string{
"from-2-*",
},
+ Status: duckv1.Status{
+ Conditions: duckv1.Conditions{
+ {
+ Type: eventingv1alpha1.EventPolicyConditionReady,
+ Status: corev1.ConditionTrue,
+ },
+ },
+ },
},
},
},
@@ -643,6 +673,14 @@ func TestEventPoliciesFromAppliedEventPoliciesStatus(t *testing.T) {
From: []string{
"from-1",
},
+ Status: duckv1.Status{
+ Conditions: duckv1.Conditions{
+ {
+ Type: eventingv1alpha1.EventPolicyConditionReady,
+ Status: corev1.ConditionTrue,
+ },
+ },
+ },
},
}, {
ObjectMeta: metav1.ObjectMeta{
@@ -660,6 +698,14 @@ func TestEventPoliciesFromAppliedEventPoliciesStatus(t *testing.T) {
From: []string{
"from-2-*",
},
+ Status: duckv1.Status{
+ Conditions: duckv1.Conditions{
+ {
+ Type: eventingv1alpha1.EventPolicyConditionReady,
+ Status: corev1.ConditionTrue,
+ },
+ },
+ },
},
},
},
@@ -728,46 +774,88 @@ func TestEventPoliciesFromAppliedEventPoliciesStatus(t *testing.T) {
defaultAuthorizationMode: feature.AuthorizationDenyAll,
expected: []*contract.EventPolicy{},
}, {
- name: "Applying policy does not exist",
+ name: "Applying policy not ready",
applyingPolicies: []string{
- "not-found",
+ "policy-1",
+ },
+ existingEventPolicies: []*eventingv1alpha1.EventPolicy{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "policy-1",
+ Namespace: "my-ns",
+ },
+ Status: eventingv1alpha1.EventPolicyStatus{
+ From: []string{
+ "from-*",
+ },
+ Status: duckv1.Status{
+ Conditions: duckv1.Conditions{
+ {
+ Type: eventingv1alpha1.EventPolicyConditionReady,
+ Status: corev1.ConditionFalse,
+ },
+ },
+ },
+ },
+ },
+ },
+ namespace: "my-ns",
+ defaultAuthorizationMode: feature.AuthorizationDenyAll,
+ expected: []*contract.EventPolicy{},
+ }, {
+ name: "No policy when OIDC is disabled",
+ oidcDisabled: true,
+ applyingPolicies: []string{
+ "policy-1",
+ },
+ existingEventPolicies: []*eventingv1alpha1.EventPolicy{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "policy-1",
+ Namespace: "my-ns",
+ },
+ Status: eventingv1alpha1.EventPolicyStatus{
+ From: []string{
+ "from-1",
+ },
+ Status: duckv1.Status{
+ Conditions: duckv1.Conditions{
+ {
+ Type: eventingv1alpha1.EventPolicyConditionReady,
+ Status: corev1.ConditionFalse, // is false, as OIDC is disabled
+ },
+ },
+ },
+ },
+ },
},
- existingEventPolicies: []*eventingv1alpha1.EventPolicy{},
namespace: "my-ns",
defaultAuthorizationMode: feature.AuthorizationAllowSameNamespace,
expected: []*contract.EventPolicy{},
- wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- ctx, _ := reconcilertesting.SetupFakeContext(t)
features := feature.Flags{
feature.AuthorizationDefaultMode: tt.defaultAuthorizationMode,
+ feature.OIDCAuthentication: feature.Enabled,
}
- for _, ep := range tt.existingEventPolicies {
- err := eventpolicyinformerfake.Get(ctx).Informer().GetStore().Add(ep)
- if err != nil {
- t.Fatal(err)
- }
+ if tt.oidcDisabled {
+ features[feature.OIDCAuthentication] = feature.Disabled
}
- applyingPoliciesStatus := eventingduck.AppliedEventPoliciesStatus{}
- for _, ep := range tt.applyingPolicies {
- applyingPoliciesStatus.Policies = append(applyingPoliciesStatus.Policies, eventingduck.AppliedEventPolicyRef{
- Name: ep,
- APIVersion: eventingv1alpha1.SchemeGroupVersion.String(),
- })
- }
-
- got, err := EventPoliciesFromAppliedEventPoliciesStatus(applyingPoliciesStatus, eventpolicyinformerfake.Get(ctx).Lister(), tt.namespace, features)
- if (err != nil) != tt.wantErr {
- t.Errorf("EventPoliciesFromAppliedEventPoliciesStatus() error = %v, wantErr %v", err, tt.wantErr)
- return
+ applyingPolicies := []*eventingv1alpha1.EventPolicy{}
+ for _, applyingPolicyName := range tt.applyingPolicies {
+ for _, existingPolicy := range tt.existingEventPolicies {
+ if applyingPolicyName == existingPolicy.Name {
+ applyingPolicies = append(applyingPolicies, existingPolicy)
+ }
+ }
}
+ got := ContractEventPoliciesFromEventPolicies(applyingPolicies, tt.namespace, features)
expectedJSON, err := protojson.Marshal(&contract.Ingress{
EventPolicies: tt.expected,
})
diff --git a/control-plane/pkg/kafka/clientpool/clientpool.go b/control-plane/pkg/kafka/clientpool/clientpool.go
index f8261f2b2b..3df2d5bbec 100644
--- a/control-plane/pkg/kafka/clientpool/clientpool.go
+++ b/control-plane/pkg/kafka/clientpool/clientpool.go
@@ -27,10 +27,11 @@ import (
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
+ "knative.dev/pkg/logging"
+
"knative.dev/eventing-kafka-broker/control-plane/pkg/kafka"
"knative.dev/eventing-kafka-broker/control-plane/pkg/prober"
"knative.dev/eventing-kafka-broker/control-plane/pkg/security"
- "knative.dev/pkg/logging"
)
type KafkaClientKey struct{}
@@ -63,8 +64,21 @@ type ClientPool struct {
}
type GetKafkaClientFunc func(ctx context.Context, bootstrapServers []string, secret *corev1.Secret) (sarama.Client, error)
+
type GetKafkaClusterAdminFunc func(ctx context.Context, bootstrapServers []string, secret *corev1.Secret) (sarama.ClusterAdmin, error)
+func DisabledGetKafkaClusterAdminFunc(_ context.Context, bootstrapServers []string, secret *corev1.Secret) (sarama.ClusterAdmin, error) {
+ c, err := makeSaramaClient(bootstrapServers, secret, sarama.NewClient)
+ if err != nil {
+ return nil, err
+ }
+ return sarama.NewClusterAdminFromClient(c)
+}
+
+func DisabledGetClient(_ context.Context, bootstrapServers []string, secret *corev1.Secret) (sarama.Client, error) {
+ return makeSaramaClient(bootstrapServers, secret, sarama.NewClient)
+}
+
func (cp *ClientPool) GetClient(ctx context.Context, bootstrapServers []string, secret *corev1.Secret) (sarama.Client, error) {
client, err := cp.getClient(ctx, bootstrapServers, secret)
if err != nil {
@@ -141,7 +155,11 @@ func (cp *ClientPool) GetClusterAdmin(ctx context.Context, bootstrapServers []st
}
func Get(ctx context.Context) *ClientPool {
- return ctx.Value(ctxKey).(*ClientPool)
+ v := ctx.Value(ctxKey)
+ if v == nil {
+ return nil
+ }
+ return v.(*ClientPool)
}
func makeClusterAdminKey(bootstrapServers []string, secret *corev1.Secret) clientKey {
@@ -162,6 +180,10 @@ func makeClusterAdminKey(bootstrapServers []string, secret *corev1.Secret) clien
}
func (cp *ClientPool) makeSaramaClient(bootstrapServers []string, secret *corev1.Secret) (sarama.Client, error) {
+ return makeSaramaClient(bootstrapServers, secret, cp.newSaramaClient)
+}
+
+func makeSaramaClient(bootstrapServers []string, secret *corev1.Secret, newSaramaClient kafka.NewClientFunc) (sarama.Client, error) {
secretOpt, err := security.NewSaramaSecurityOptionFromSecret(secret)
if err != nil {
return nil, err
@@ -172,7 +194,7 @@ func (cp *ClientPool) makeSaramaClient(bootstrapServers []string, secret *corev1
return nil, err
}
- saramaClient, err := cp.newSaramaClient(bootstrapServers, config)
+ saramaClient, err := newSaramaClient(bootstrapServers, config)
if err != nil {
return nil, err
}
diff --git a/control-plane/pkg/reconciler/broker/broker.go b/control-plane/pkg/reconciler/broker/broker.go
index eaedd85863..8fa124f496 100644
--- a/control-plane/pkg/reconciler/broker/broker.go
+++ b/control-plane/pkg/reconciler/broker/broker.go
@@ -22,7 +22,7 @@ import (
"strings"
"time"
- eventingduck "knative.dev/eventing/pkg/apis/duck/v1"
+ eventingv1alpha1 "knative.dev/eventing/pkg/apis/eventing/v1alpha1"
"k8s.io/utils/ptr"
@@ -191,13 +191,13 @@ func (r *Reconciler) reconcileKind(ctx context.Context, broker *eventing.Broker)
audience = nil
}
- err = auth.UpdateStatusWithEventPolicies(features, &broker.Status.AppliedEventPoliciesStatus, &broker.Status, r.EventPolicyLister, eventing.SchemeGroupVersion.WithKind("Broker"), broker.ObjectMeta)
+ applyingEventPolicies, err := auth.GetEventPoliciesForResource(r.EventPolicyLister, eventing.SchemeGroupVersion.WithKind("Broker"), broker.ObjectMeta)
if err != nil {
- return fmt.Errorf("could not update broker status with EventPolicies: %v", err)
+ return fmt.Errorf("could not get applying eventpolicies for broker: %v", err)
}
// Get resource configuration.
- brokerResource, err := r.reconcilerBrokerResource(ctx, topic, broker, secret, topicConfig, audience, broker.Status.AppliedEventPoliciesStatus)
+ brokerResource, err := r.reconcilerBrokerResource(ctx, topic, broker, secret, topicConfig, audience, applyingEventPolicies)
if err != nil {
return statusConditionManager.FailedToResolveConfig(err)
}
@@ -253,6 +253,11 @@ func (r *Reconciler) reconcileKind(ctx context.Context, broker *eventing.Broker)
logger.Debug("Updated dispatcher pod annotation")
}
+ err = auth.UpdateStatusWithProvidedEventPolicies(features, &broker.Status.AppliedEventPoliciesStatus, &broker.Status, applyingEventPolicies)
+ if err != nil {
+ return fmt.Errorf("could not update Broker status with EventPolicies: %v", err)
+ }
+
ingressHost := network.GetServiceHostname(r.Env.IngressName, r.DataPlaneNamespace)
var addressableStatus duckv1.AddressStatus
@@ -623,14 +628,15 @@ func rebuildCMFromStatusAnnotations(br *eventing.Broker) *corev1.ConfigMap {
return cm
}
-func (r *Reconciler) reconcilerBrokerResource(ctx context.Context, topic string, broker *eventing.Broker, secret *corev1.Secret, config *kafka.TopicConfig, audience *string, appliedEventPoliciesStatus eventingduck.AppliedEventPoliciesStatus) (*contract.Resource, error) {
+func (r *Reconciler) reconcilerBrokerResource(ctx context.Context, topic string, broker *eventing.Broker, secret *corev1.Secret, config *kafka.TopicConfig, audience *string, applyingEventPolicies []*eventingv1alpha1.EventPolicy) (*contract.Resource, error) {
features := feature.FromContext(ctx)
resource := &contract.Resource{
Uid: string(broker.UID),
Topics: []string{topic},
Ingress: &contract.Ingress{
- Path: receiver.PathFromObject(broker),
+ Path: receiver.PathFromObject(broker),
+ EventPolicies: coreconfig.ContractEventPoliciesFromEventPolicies(applyingEventPolicies, broker.Namespace, features),
},
FeatureFlags: &contract.FeatureFlags{
EnableEventTypeAutocreate: features.IsEnabled(feature.EvenTypeAutoCreate),
@@ -666,12 +672,6 @@ func (r *Reconciler) reconcilerBrokerResource(ctx context.Context, topic string,
}
resource.EgressConfig = egressConfig
- eventPolicies, err := coreconfig.EventPoliciesFromAppliedEventPoliciesStatus(appliedEventPoliciesStatus, r.EventPolicyLister, broker.Namespace, features)
- if err != nil {
- return nil, fmt.Errorf("could not get eventpolicies from broker status: %w", err)
- }
- resource.Ingress.EventPolicies = eventPolicies
-
return resource, nil
}
diff --git a/control-plane/pkg/reconciler/broker/broker_test.go b/control-plane/pkg/reconciler/broker/broker_test.go
index f8c720dfc2..19e6642e8e 100644
--- a/control-plane/pkg/reconciler/broker/broker_test.go
+++ b/control-plane/pkg/reconciler/broker/broker_test.go
@@ -1272,7 +1272,6 @@ func brokerReconciliation(t *testing.T, format string, env config.Env) {
StatusBrokerDataPlaneAvailable,
StatusBrokerConfigNotParsed("failed to resolve Spec.Delivery.DeadLetterSink: destination missing Ref and URI, expected at least one"),
StatusBrokerTopicReady,
- reconcilertesting.WithBrokerEventPoliciesReadyBecauseOIDCDisabled(),
BrokerConfigMapAnnotations(),
WithTopicStatusAnnotation(BrokerTopic()),
),
diff --git a/control-plane/pkg/reconciler/broker/controller.go b/control-plane/pkg/reconciler/broker/controller.go
index e7283ca358..c8d913a107 100644
--- a/control-plane/pkg/reconciler/broker/controller.go
+++ b/control-plane/pkg/reconciler/broker/controller.go
@@ -69,8 +69,6 @@ func NewController(ctx context.Context, watcher configmap.Watcher, env *config.E
eventPolicyInformer := eventpolicyinformer.Get(ctx)
featureFlags := apisconfig.DefaultFeaturesConfig()
- clientPool := clientpool.Get(ctx)
-
reconciler := &Reconciler{
Reconciler: &base.Reconciler{
KubeClient: kubeclient.Get(ctx),
@@ -83,12 +81,18 @@ func NewController(ctx context.Context, watcher configmap.Watcher, env *config.E
DispatcherLabel: base.BrokerDispatcherLabel,
ReceiverLabel: base.BrokerReceiverLabel,
},
- GetKafkaClusterAdmin: clientPool.GetClusterAdmin,
- ConfigMapLister: configmapInformer.Lister(),
- EventPolicyLister: eventPolicyInformer.Lister(),
- Env: env,
- Counter: counter.NewExpiringCounter(ctx),
- KafkaFeatureFlags: featureFlags,
+ ConfigMapLister: configmapInformer.Lister(),
+ EventPolicyLister: eventPolicyInformer.Lister(),
+ Env: env,
+ Counter: counter.NewExpiringCounter(ctx),
+ KafkaFeatureFlags: featureFlags,
+ }
+
+ clientPool := clientpool.Get(ctx)
+ if clientPool == nil {
+ reconciler.GetKafkaClusterAdmin = clientpool.DisabledGetKafkaClusterAdminFunc
+ } else {
+ reconciler.GetKafkaClusterAdmin = clientPool.GetClusterAdmin
}
logger := logging.FromContext(ctx)
diff --git a/control-plane/pkg/reconciler/broker/namespaced_controller.go b/control-plane/pkg/reconciler/broker/namespaced_controller.go
index eed5bdc508..871f5139a2 100644
--- a/control-plane/pkg/reconciler/broker/namespaced_controller.go
+++ b/control-plane/pkg/reconciler/broker/namespaced_controller.go
@@ -92,8 +92,6 @@ func NewNamespacedController(ctx context.Context, watcher configmap.Watcher, env
logger.Fatal("unable to create Manifestival client-go client", zap.Error(err))
}
- clientPool := clientpool.Get(ctx)
-
reconciler := &NamespacedReconciler{
Reconciler: &base.Reconciler{
KubeClient: kubeclient.Get(ctx),
@@ -107,7 +105,6 @@ func NewNamespacedController(ctx context.Context, watcher configmap.Watcher, env
DispatcherLabel: base.BrokerDispatcherLabel,
ReceiverLabel: base.BrokerReceiverLabel,
},
- GetKafkaClusterAdmin: clientPool.GetClusterAdmin,
NamespaceLister: namespaceinformer.Get(ctx).Lister(),
ConfigMapLister: configmapInformer.Lister(),
ServiceAccountLister: serviceaccountinformer.Get(ctx).Lister(),
@@ -124,6 +121,13 @@ func NewNamespacedController(ctx context.Context, watcher configmap.Watcher, env
KafkaFeatureFlags: apisconfig.DefaultFeaturesConfig(),
}
+ clientPool := clientpool.Get(ctx)
+ if clientPool == nil {
+ reconciler.GetKafkaClusterAdmin = clientpool.DisabledGetKafkaClusterAdminFunc
+ } else {
+ reconciler.GetKafkaClusterAdmin = clientPool.GetClusterAdmin
+ }
+
impl := brokerreconciler.NewImpl(ctx, reconciler, kafka.NamespacedBrokerClass, func(impl *controller.Impl) controller.Options {
return controller.Options{PromoteFilterFunc: kafka.NamespacedBrokerClassFilter()}
})
diff --git a/control-plane/pkg/reconciler/channel/channel.go b/control-plane/pkg/reconciler/channel/channel.go
index 3222d11b7d..e639d553fd 100644
--- a/control-plane/pkg/reconciler/channel/channel.go
+++ b/control-plane/pkg/reconciler/channel/channel.go
@@ -24,6 +24,8 @@ import (
"strings"
"time"
+ "knative.dev/eventing/pkg/apis/eventing/v1alpha1"
+
eventingv1alpha1listers "knative.dev/eventing/pkg/client/listers/eventing/v1alpha1"
"k8s.io/utils/pointer"
@@ -40,13 +42,14 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
corelisters "k8s.io/client-go/listers/core/v1"
- "knative.dev/eventing-kafka-broker/control-plane/pkg/kafka/clientpool"
- "knative.dev/eventing-kafka-broker/control-plane/pkg/reconciler/channel/resources"
"knative.dev/eventing/pkg/apis/feature"
"knative.dev/pkg/network"
"knative.dev/pkg/resolver"
"knative.dev/pkg/system"
+ "knative.dev/eventing-kafka-broker/control-plane/pkg/kafka/clientpool"
+ "knative.dev/eventing-kafka-broker/control-plane/pkg/reconciler/channel/resources"
+
v1 "knative.dev/eventing/pkg/apis/duck/v1"
messaging "knative.dev/eventing/pkg/apis/messaging/v1"
"knative.dev/pkg/apis"
@@ -232,8 +235,13 @@ func (r *Reconciler) ReconcileKind(ctx context.Context, channel *messagingv1beta
audience = nil
}
+ applyingEventPolicies, err := auth.GetEventPoliciesForResource(r.EventPolicyLister, messagingv1beta1.SchemeGroupVersion.WithKind("KafkaChannel"), channel.ObjectMeta)
+ if err != nil {
+ return fmt.Errorf("could not get applying eventpolicies for kafkaChannel: %v", err)
+ }
+
// Get resource configuration
- channelResource, err := r.getChannelContractResource(ctx, topic, channel, authContext, topicConfig, audience, channel.Status.AppliedEventPoliciesStatus)
+ channelResource, err := r.getChannelContractResource(ctx, topic, channel, authContext, topicConfig, audience, applyingEventPolicies)
if err != nil {
return statusConditionManager.FailedToResolveConfig(err)
}
@@ -263,6 +271,11 @@ func (r *Reconciler) ReconcileKind(ctx context.Context, channel *messagingv1beta
logger.Debug("Contract config map updated")
statusConditionManager.ConfigMapUpdated()
+ err = auth.UpdateStatusWithProvidedEventPolicies(featureFlags, &channel.Status.AppliedEventPoliciesStatus, &channel.Status, applyingEventPolicies)
+ if err != nil {
+ return fmt.Errorf("could not update KafkaChannel status with EventPolicies: %v", err)
+ }
+
// We update receiver pods annotation regardless of our contract changed or not due to the fact
// that in a previous reconciliation we might have failed to update one of our data plane pod annotation, so we want
// to anyway update remaining annotations with the contract generation that was saved in the CM.
@@ -590,6 +603,13 @@ func (r *Reconciler) reconcileConsumerGroup(ctx context.Context, channel *messag
},
},
Spec: internalscg.ConsumerGroupSpec{
+ TopLevelResourceRef: &corev1.ObjectReference{
+ APIVersion: messagingv1beta1.SchemeGroupVersion.String(),
+ Kind: "KafkaChannel",
+ Name: channel.Name,
+ Namespace: channel.Namespace,
+ UID: channel.UID,
+ },
Template: internalscg.ConsumerTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
@@ -688,15 +708,16 @@ func (r *Reconciler) reconcileConsumerGroup(ctx context.Context, channel *messag
return cg, nil
}
-func (r *Reconciler) getChannelContractResource(ctx context.Context, topic string, channel *messagingv1beta1.KafkaChannel, auth *security.NetSpecAuthContext, config *kafka.TopicConfig, audience *string, appliedEventPoliciesStatus v1.AppliedEventPoliciesStatus) (*contract.Resource, error) {
+func (r *Reconciler) getChannelContractResource(ctx context.Context, topic string, channel *messagingv1beta1.KafkaChannel, auth *security.NetSpecAuthContext, config *kafka.TopicConfig, audience *string, applyingEventPolicies []*v1alpha1.EventPolicy) (*contract.Resource, error) {
features := feature.FromContext(ctx)
resource := &contract.Resource{
Uid: string(channel.UID),
Topics: []string{topic},
Ingress: &contract.Ingress{
- Host: receiver.Host(channel.GetNamespace(), channel.GetName()),
- Path: receiver.Path(channel.GetNamespace(), channel.GetName()),
+ Host: receiver.Host(channel.GetNamespace(), channel.GetName()),
+ Path: receiver.Path(channel.GetNamespace(), channel.GetName()),
+ EventPolicies: coreconfig.ContractEventPoliciesFromEventPolicies(applyingEventPolicies, channel.Namespace, features),
},
FeatureFlags: &contract.FeatureFlags{
EnableEventTypeAutocreate: features.IsEnabled(feature.EvenTypeAutoCreate) && !ownedByBroker(channel),
@@ -721,12 +742,6 @@ func (r *Reconciler) getChannelContractResource(ctx context.Context, topic strin
resource.Ingress.Audience = *audience
}
- eventPolicies, err := coreconfig.EventPoliciesFromAppliedEventPoliciesStatus(appliedEventPoliciesStatus, r.EventPolicyLister, channel.Namespace, features)
- if err != nil {
- return nil, fmt.Errorf("could not get eventpolicies from channel status: %w", err)
- }
- resource.Ingress.EventPolicies = eventPolicies
-
egressConfig, err := coreconfig.EgressConfigFromDelivery(ctx, r.Resolver, channel, channel.Spec.Delivery, r.DefaultBackoffDelayMs)
if err != nil {
return nil, err
diff --git a/control-plane/pkg/reconciler/channel/channel_test.go b/control-plane/pkg/reconciler/channel/channel_test.go
index 7b5bf54399..5ec78fd5b9 100644
--- a/control-plane/pkg/reconciler/channel/channel_test.go
+++ b/control-plane/pkg/reconciler/channel/channel_test.go
@@ -62,10 +62,13 @@ import (
messagingv1beta1kafkachannelreconciler "knative.dev/eventing-kafka-broker/control-plane/pkg/client/injection/reconciler/messaging/v1beta1/kafkachannel"
"github.com/rickb777/date/period"
+
+ eventingrekttesting "knative.dev/eventing/pkg/reconciler/testing/v1"
+ reconcilertesting "knative.dev/eventing/pkg/reconciler/testing/v1"
+
internalscg "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/internalskafkaeventing/v1alpha1"
kafkainternals "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/internalskafkaeventing/v1alpha1"
fakeconsumergroupinformer "knative.dev/eventing-kafka-broker/control-plane/pkg/client/injection/client/fake"
- eventingrekttesting "knative.dev/eventing/pkg/reconciler/testing/v1"
)
const (
@@ -76,6 +79,9 @@ const (
TestExpectedReplicationFactor = "TestExpectedReplicationFactor"
TestExpectedRetentionDuration = "TestExpectedRetentionDuration"
+ readyEventPolicyName = "test-event-policy-ready"
+ unreadyEventPolicyName = "test-event-policy-unready"
+
kafkaFeatureFlags = "kafka-feature-flags"
)
@@ -98,6 +104,12 @@ var DefaultEnv = &config.Env{
var (
testCaCerts = string(eventingtlstesting.CA)
+
+ channelGVK = metav1.GroupVersionKind{
+ Group: "messaging.knative.dev",
+ Version: "v1beta1",
+ Kind: "KafkaChannel",
+ }
)
func TestReconcileKind(t *testing.T) {
@@ -240,6 +252,7 @@ func TestReconcileKind(t *testing.T) {
ChannelAddressable(&env),
StatusProbeSucceeded,
StatusChannelSubscribers(),
+ WithChannelEventPoliciesReadyBecauseOIDCDisabled(),
),
},
},
@@ -315,6 +328,7 @@ func TestReconcileKind(t *testing.T) {
StatusProbeSucceeded,
StatusChannelSubscribers(),
WithChannelDeadLetterSinkURI(ServiceURL),
+ WithChannelEventPoliciesReadyBecauseOIDCDisabled(),
),
},
},
@@ -367,6 +381,7 @@ func TestReconcileKind(t *testing.T) {
StatusTopicReadyWithName(ChannelTopic()),
StatusProbeFailed(prober.StatusNotReady),
StatusChannelSubscribers(),
+ WithChannelEventPoliciesReadyBecauseOIDCDisabled(),
),
},
},
@@ -422,6 +437,7 @@ func TestReconcileKind(t *testing.T) {
StatusTopicReadyWithName(ChannelTopic()),
StatusProbeFailed(prober.StatusUnknown),
StatusChannelSubscribers(),
+ WithChannelEventPoliciesReadyBecauseOIDCDisabled(),
),
},
},
@@ -464,6 +480,7 @@ func TestReconcileKind(t *testing.T) {
ConsumerSubscriber(NewConsumerSpecSubscriber(Subscription1URI)),
ConsumerReply(ConsumerUrlReply(apis.HTTP(Subscription1ReplyURI))),
)),
+ withChannelTopLevelResourceRef(),
),
},
WantUpdates: []clientgotesting.UpdateActionImpl{
@@ -497,6 +514,7 @@ func TestReconcileKind(t *testing.T) {
StatusProbeSucceeded,
WithSubscribers(Subscriber1(WithUnknownSubscriber)),
StatusChannelSubscribersUnknown(),
+ WithChannelEventPoliciesReadyBecauseOIDCDisabled(),
),
},
},
@@ -536,6 +554,7 @@ func TestReconcileKind(t *testing.T) {
ConsumerSubscriber(NewConsumerSpecSubscriber(Subscription1URI)),
ConsumerReply(ConsumerUrlReply(apis.HTTP(Subscription1ReplyURI))),
)),
+ withChannelTopLevelResourceRef(),
),
},
WantUpdates: []clientgotesting.UpdateActionImpl{
@@ -570,6 +589,7 @@ func TestReconcileKind(t *testing.T) {
StatusProbeSucceeded,
WithSubscribers(Subscriber1(WithUnknownSubscriber)),
StatusChannelSubscribersUnknown(),
+ WithChannelEventPoliciesReadyBecauseOIDCDisabled(),
),
},
},
@@ -609,6 +629,7 @@ func TestReconcileKind(t *testing.T) {
ConsumerSubscriber(NewConsumerSpecSubscriber(Subscription1URI)),
ConsumerReply(ConsumerUrlReply(apis.HTTP(Subscription1ReplyURI))),
)),
+ withChannelTopLevelResourceRef(),
),
},
WantUpdates: []clientgotesting.UpdateActionImpl{
@@ -642,6 +663,7 @@ func TestReconcileKind(t *testing.T) {
StatusProbeSucceeded,
WithSubscribers(Subscriber1(WithUnknownSubscriber)),
StatusChannelSubscribersUnknown(),
+ WithChannelEventPoliciesReadyBecauseOIDCDisabled(),
),
},
},
@@ -667,6 +689,7 @@ func TestReconcileKind(t *testing.T) {
WithConsumerGroupOwnerRef(kmeta.NewControllerRef(NewChannel())),
WithConsumerGroupMetaLabels(OwnerAsChannelLabel),
ConsumerGroupReady,
+ withChannelTopLevelResourceRef(),
),
},
Key: testKey,
@@ -708,6 +731,7 @@ func TestReconcileKind(t *testing.T) {
ConsumerReply(ConsumerUrlReply(apis.HTTP(Subscription1ReplyURI))),
)),
ConsumerGroupReady,
+ withChannelTopLevelResourceRef(),
),
},
},
@@ -724,6 +748,7 @@ func TestReconcileKind(t *testing.T) {
WithSubscribers(Subscriber1(WithFreshSubscriber)),
StatusChannelSubscribers(),
StatusProbeSucceeded,
+ WithChannelEventPoliciesReadyBecauseOIDCDisabled(),
),
},
},
@@ -761,6 +786,7 @@ func TestReconcileKind(t *testing.T) {
)),
ConsumerGroupReplicas(1),
WithConsumerGroupFailed("failed to reconcile consumer group,", "internal error"),
+ withChannelTopLevelResourceRef(),
),
},
Key: testKey,
@@ -779,6 +805,7 @@ func TestReconcileKind(t *testing.T) {
StatusProbeSucceeded,
WithSubscribers(Subscriber1(WithUnreadySubscriber)),
StatusChannelSubscribersUnknown(),
+ WithChannelEventPoliciesReadyBecauseOIDCDisabled(),
),
},
},
@@ -839,6 +866,7 @@ func TestReconcileKind(t *testing.T) {
ConsumerSubscriber(NewConsumerSpecSubscriber(Subscription1URI)),
ConsumerReply(ConsumerUrlReply(apis.HTTP(Subscription1ReplyURI))),
)),
+ withChannelTopLevelResourceRef(),
),
NewConsumerGroup(
WithConsumerGroupName(Subscription2UUID),
@@ -856,6 +884,7 @@ func TestReconcileKind(t *testing.T) {
ConsumerSubscriber(NewConsumerSpecSubscriber(Subscription2URI)),
ConsumerReply(ConsumerNoReply()),
)),
+ withChannelTopLevelResourceRef(),
),
},
WantUpdates: []clientgotesting.UpdateActionImpl{
@@ -890,6 +919,7 @@ func TestReconcileKind(t *testing.T) {
StatusProbeSucceeded,
WithSubscribers(Subscriber1(WithUnknownSubscriber), Subscriber2(WithUnknownSubscriber)),
StatusChannelSubscribersUnknown(),
+ WithChannelEventPoliciesReadyBecauseOIDCDisabled(),
),
},
},
@@ -926,6 +956,7 @@ func TestReconcileKind(t *testing.T) {
ConsumerDelivery(NewConsumerSpecDelivery(kafkasource.Ordered)),
ConsumerSubscriber(NewConsumerSpecSubscriber(Subscription2URI)),
)),
+ withChannelTopLevelResourceRef(),
),
},
Key: testKey,
@@ -947,6 +978,7 @@ func TestReconcileKind(t *testing.T) {
ConsumerSubscriber(NewConsumerSpecSubscriber(Subscription1URI)),
ConsumerReply(ConsumerUrlReply(apis.HTTP(Subscription1ReplyURI))),
)),
+ withChannelTopLevelResourceRef(),
),
},
WantUpdates: []clientgotesting.UpdateActionImpl{
@@ -980,6 +1012,7 @@ func TestReconcileKind(t *testing.T) {
StatusProbeSucceeded,
WithSubscribers(Subscriber1(WithUnknownSubscriber)),
StatusChannelSubscribersUnknown(),
+ WithChannelEventPoliciesReadyBecauseOIDCDisabled(),
),
},
},
@@ -1148,6 +1181,7 @@ func TestReconcileKind(t *testing.T) {
ChannelAddressable(&env),
StatusProbeSucceeded,
StatusChannelSubscribers(),
+ WithChannelEventPoliciesReadyBecauseOIDCDisabled(),
),
},
},
@@ -1196,6 +1230,7 @@ func TestReconcileKind(t *testing.T) {
)),
ConsumerGroupReplicas(1),
ConsumerGroupReady,
+ withChannelTopLevelResourceRef(),
),
},
Key: testKey,
@@ -1251,6 +1286,7 @@ func TestReconcileKind(t *testing.T) {
WithSubscribers(Subscriber1(WithFreshSubscriber)),
StatusProbeSucceeded,
StatusChannelSubscribers(),
+ WithChannelEventPoliciesReadyBecauseOIDCDisabled(),
),
},
},
@@ -1301,6 +1337,7 @@ func TestReconcileKind(t *testing.T) {
)),
ConsumerGroupReplicas(1),
ConsumerGroupReady,
+ withChannelTopLevelResourceRef(),
),
},
Key: testKey,
@@ -1356,6 +1393,7 @@ func TestReconcileKind(t *testing.T) {
WithSubscribers(Subscriber1(WithFreshSubscriber)),
StatusProbeSucceeded,
StatusChannelSubscribers(),
+ WithChannelEventPoliciesReadyBecauseOIDCDisabled(),
),
},
},
@@ -1405,6 +1443,7 @@ func TestReconcileKind(t *testing.T) {
)),
ConsumerGroupReplicas(1),
ConsumerGroupReady,
+ withChannelTopLevelResourceRef(),
),
},
Key: testKey,
@@ -1457,6 +1496,7 @@ func TestReconcileKind(t *testing.T) {
WithSubscribers(Subscriber1(WithFreshSubscriber)),
StatusProbeSucceeded,
StatusChannelSubscribers(),
+ WithChannelEventPoliciesReadyBecauseOIDCDisabled(),
),
},
},
@@ -1503,6 +1543,7 @@ func TestReconcileKind(t *testing.T) {
ConsumerSubscriber(NewConsumerSpecSubscriber(Subscription1URI)),
ConsumerReply(ConsumerUrlReply(apis.HTTP(Subscription1ReplyURI))),
)),
+ withChannelTopLevelResourceRef(),
),
},
WantUpdates: []clientgotesting.UpdateActionImpl{
@@ -1536,6 +1577,7 @@ func TestReconcileKind(t *testing.T) {
StatusProbeSucceeded,
WithSubscribers(Subscriber1(WithUnknownSubscriber)),
StatusChannelSubscribersUnknown(),
+ WithChannelEventPoliciesReadyBecauseOIDCDisabled(),
),
},
},
@@ -1589,6 +1631,7 @@ func TestReconcileKind(t *testing.T) {
ChannelAddressable(&env),
StatusProbeSucceeded,
StatusChannelSubscribers(),
+ WithChannelEventPoliciesReadyBecauseOIDCDisabled(),
),
},
},
@@ -1641,6 +1684,7 @@ func TestReconcileKind(t *testing.T) {
ChannelAddressable(&env),
StatusProbeSucceeded,
StatusChannelSubscribers(),
+ WithChannelEventPoliciesReadyBecauseOIDCDisabled(),
),
},
},
@@ -1754,6 +1798,7 @@ func TestReconcileKind(t *testing.T) {
StatusProbeSucceeded,
StatusChannelSubscribers(),
WithChannelDeadLetterSinkURI(ServiceURL),
+ WithChannelEventPoliciesReadyBecauseOIDCDisabled(),
),
},
},
@@ -1856,6 +1901,7 @@ func TestReconcileKind(t *testing.T) {
URL: ChannelAddress(),
}),
WithChannelAddessable(),
+ WithChannelEventPoliciesReadyBecauseOIDCDisabled(),
),
},
},
@@ -1948,6 +1994,7 @@ func TestReconcileKind(t *testing.T) {
CACerts: pointer.String(testCaCerts),
}),
WithChannelAddessable(),
+ WithChannelEventPoliciesReadyBecauseOIDCDisabled(),
),
},
},
@@ -1973,7 +2020,8 @@ func TestReconcileKind(t *testing.T) {
},
Key: testKey,
Ctx: feature.ToContext(context.Background(), feature.Flags{
- feature.OIDCAuthentication: feature.Enabled,
+ feature.OIDCAuthentication: feature.Enabled,
+ feature.AuthorizationDefaultMode: feature.AuthorizationDenyAll,
}),
WantUpdates: []clientgotesting.UpdateActionImpl{
ConfigMapUpdate(env.DataPlaneConfigMapNamespace, env.ContractConfigMapName, env.ContractConfigMapFormat, &contract.Contract{
@@ -2027,6 +2075,212 @@ func TestReconcileKind(t *testing.T) {
Audience: pointer.String(ChannelAudience),
}),
WithChannelAddessable(),
+ WithChannelEventPoliciesReadyBecauseNoPolicyAndOIDCEnabled(feature.AuthorizationDenyAll),
+ ),
+ },
+ },
+ WantPatches: []clientgotesting.PatchActionImpl{
+ patchFinalizers(),
+ },
+ WantEvents: []string{
+ finalizerUpdatedEvent,
+ },
+ },
+ {
+ Name: "Should list applying EventPolicies",
+ Objects: []runtime.Object{
+ NewChannel(),
+ NewConfigMapWithTextData(env.SystemNamespace, DefaultEnv.GeneralConfigMapName, map[string]string{
+ kafka.BootstrapServersConfigMapKey: ChannelBootstrapServers,
+ }),
+ ChannelReceiverPod(env.SystemNamespace, map[string]string{
+ base.VolumeGenerationAnnotationKey: "0",
+ "annotation_to_preserve": "value_to_preserve",
+ }),
+ reconcilertesting.NewEventPolicy(readyEventPolicyName, ChannelNamespace,
+ reconcilertesting.WithReadyEventPolicyCondition,
+ reconcilertesting.WithEventPolicyToRef(channelGVK, ChannelName),
+ reconcilertesting.WithEventPolicyStatusFromSub([]string{
+ "sub",
+ }),
+ ),
+ },
+ Key: testKey,
+ Ctx: feature.ToContext(context.Background(), feature.Flags{
+ feature.OIDCAuthentication: feature.Enabled,
+ feature.AuthorizationDefaultMode: feature.AuthorizationAllowSameNamespace,
+ }),
+ WantUpdates: []clientgotesting.UpdateActionImpl{
+ ConfigMapUpdate(env.DataPlaneConfigMapNamespace, env.ContractConfigMapName, env.ContractConfigMapFormat, &contract.Contract{
+ Generation: 1,
+ Resources: []*contract.Resource{
+ {
+ Uid: ChannelUUID,
+ Topics: []string{ChannelTopic()},
+ BootstrapServers: ChannelBootstrapServers,
+ Reference: ChannelReference(),
+ Ingress: &contract.Ingress{
+ Host: receiver.Host(ChannelNamespace, ChannelName),
+ Path: receiver.Path(ChannelNamespace, ChannelName),
+ Audience: ChannelAudience,
+ EventPolicies: []*contract.EventPolicy{
+ {
+ TokenMatchers: []*contract.TokenMatcher{
+ {
+ Matcher: &contract.TokenMatcher_Exact{
+ Exact: &contract.Exact{
+ Attributes: map[string]string{
+ "sub": "sub",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ FeatureFlags: FeatureFlagsETAutocreate(false),
+ },
+ },
+ }),
+ ChannelReceiverPodUpdate(env.SystemNamespace, map[string]string{
+ "annotation_to_preserve": "value_to_preserve",
+ base.VolumeGenerationAnnotationKey: "1",
+ }),
+ },
+ SkipNamespaceValidation: true, // WantCreates compare the channel namespace with configmap namespace, so skip it
+ WantCreates: []runtime.Object{
+ NewConfigMapWithBinaryData(env.DataPlaneConfigMapNamespace, env.ContractConfigMapName, nil),
+ NewPerChannelService(&env),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{
+ {
+ Object: NewChannel(
+ WithInitKafkaChannelConditions,
+ StatusConfigParsed,
+ StatusConfigMapUpdatedReady(&env),
+ WithChannelTopicStatusAnnotation(ChannelTopic()),
+ StatusTopicReadyWithName(ChannelTopic()),
+ ChannelAddressable(&env),
+ StatusProbeSucceeded,
+ StatusChannelSubscribers(),
+ WithChannelAddresses([]duckv1.Addressable{
+ {
+ Name: pointer.String("http"),
+ URL: ChannelAddress(),
+ Audience: pointer.String(ChannelAudience),
+ },
+ }),
+ WithChannelAddress(duckv1.Addressable{
+ Name: pointer.String("http"),
+ URL: ChannelAddress(),
+ Audience: pointer.String(ChannelAudience),
+ }),
+ WithChannelAddessable(),
+ WithChannelEventPoliciesReady(),
+ WithChannelEventPoliciesListed(readyEventPolicyName),
+ ),
+ },
+ },
+ WantPatches: []clientgotesting.PatchActionImpl{
+ patchFinalizers(),
+ },
+ WantEvents: []string{
+ finalizerUpdatedEvent,
+ },
+ }, {
+ Name: "Should mark as NotReady on unready EventPolicies",
+ Objects: []runtime.Object{
+ NewChannel(),
+ NewConfigMapWithTextData(env.SystemNamespace, DefaultEnv.GeneralConfigMapName, map[string]string{
+ kafka.BootstrapServersConfigMapKey: ChannelBootstrapServers,
+ }),
+ ChannelReceiverPod(env.SystemNamespace, map[string]string{
+ base.VolumeGenerationAnnotationKey: "0",
+ "annotation_to_preserve": "value_to_preserve",
+ }),
+ reconcilertesting.NewEventPolicy(unreadyEventPolicyName, ChannelNamespace,
+ reconcilertesting.WithUnreadyEventPolicyCondition("", ""),
+ reconcilertesting.WithEventPolicyToRef(channelGVK, ChannelName),
+ reconcilertesting.WithEventPolicyStatusFromSub([]string{
+ "sub",
+ }),
+ ),
+ },
+ Key: testKey,
+ Ctx: feature.ToContext(context.Background(), feature.Flags{
+ feature.OIDCAuthentication: feature.Enabled,
+ feature.AuthorizationDefaultMode: feature.AuthorizationAllowSameNamespace,
+ }),
+ WantUpdates: []clientgotesting.UpdateActionImpl{
+ ConfigMapUpdate(env.DataPlaneConfigMapNamespace, env.ContractConfigMapName, env.ContractConfigMapFormat, &contract.Contract{
+ Generation: 1,
+ Resources: []*contract.Resource{
+ {
+ Uid: ChannelUUID,
+ Topics: []string{ChannelTopic()},
+ BootstrapServers: ChannelBootstrapServers,
+ Reference: ChannelReference(),
+ Ingress: &contract.Ingress{
+ Host: receiver.Host(ChannelNamespace, ChannelName),
+ Path: receiver.Path(ChannelNamespace, ChannelName),
+ Audience: ChannelAudience,
+ EventPolicies: []*contract.EventPolicy{
+ {
+ TokenMatchers: []*contract.TokenMatcher{
+ {
+ Matcher: &contract.TokenMatcher_Prefix{
+ Prefix: &contract.Prefix{
+ Attributes: map[string]string{
+ "sub": "system:serviceaccount:" + ChannelNamespace + ":",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ FeatureFlags: FeatureFlagsETAutocreate(false),
+ },
+ },
+ }),
+ ChannelReceiverPodUpdate(env.SystemNamespace, map[string]string{
+ "annotation_to_preserve": "value_to_preserve",
+ base.VolumeGenerationAnnotationKey: "1",
+ }),
+ },
+ SkipNamespaceValidation: true, // WantCreates compare the channel namespace with configmap namespace, so skip it
+ WantCreates: []runtime.Object{
+ NewConfigMapWithBinaryData(env.DataPlaneConfigMapNamespace, env.ContractConfigMapName, nil),
+ NewPerChannelService(&env),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{
+ {
+ Object: NewChannel(
+ WithInitKafkaChannelConditions,
+ StatusConfigParsed,
+ StatusConfigMapUpdatedReady(&env),
+ WithChannelTopicStatusAnnotation(ChannelTopic()),
+ StatusTopicReadyWithName(ChannelTopic()),
+ ChannelAddressable(&env),
+ StatusProbeSucceeded,
+ StatusChannelSubscribers(),
+ WithChannelAddresses([]duckv1.Addressable{
+ {
+ Name: pointer.String("http"),
+ URL: ChannelAddress(),
+ Audience: pointer.String(ChannelAudience),
+ },
+ }),
+ WithChannelAddress(duckv1.Addressable{
+ Name: pointer.String("http"),
+ URL: ChannelAddress(),
+ Audience: pointer.String(ChannelAudience),
+ }),
+ WithChannelAddessable(),
+ WithChannelEventPoliciesReady(),
+ WithChannelEventPoliciesNotReady("EventPoliciesNotReady", fmt.Sprintf("event policies %s are not ready", unreadyEventPolicyName)),
),
},
},
@@ -2192,3 +2446,13 @@ func httpsURL(name string, namespace string) *apis.URL {
Path: fmt.Sprintf("/%s/%s", namespace, name),
}
}
+
+func withChannelTopLevelResourceRef() ConsumerGroupOption {
+ return WithTopLevelResourceRef(&corev1.ObjectReference{
+ APIVersion: messagingv1beta.SchemeGroupVersion.String(),
+ Kind: "KafkaChannel",
+ Namespace: ChannelNamespace,
+ Name: ChannelName,
+ UID: ChannelUUID,
+ })
+}
diff --git a/control-plane/pkg/reconciler/channel/controller.go b/control-plane/pkg/reconciler/channel/controller.go
index 739a271180..b41e333eb5 100644
--- a/control-plane/pkg/reconciler/channel/controller.go
+++ b/control-plane/pkg/reconciler/channel/controller.go
@@ -55,6 +55,8 @@ import (
"knative.dev/pkg/controller"
+ "knative.dev/eventing/pkg/auth"
+
apisconfig "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/config"
"knative.dev/eventing-kafka-broker/control-plane/pkg/config"
"knative.dev/eventing-kafka-broker/control-plane/pkg/reconciler/base"
@@ -70,8 +72,6 @@ func NewController(ctx context.Context, watcher configmap.Watcher, configs *conf
messagingv1beta.RegisterAlternateKafkaChannelConditionSet(conditionSet)
- clientPool := clientpool.Get(ctx)
-
reconciler := &Reconciler{
Reconciler: &base.Reconciler{
KubeClient: kubeclient.Get(ctx),
@@ -83,15 +83,21 @@ func NewController(ctx context.Context, watcher configmap.Watcher, configs *conf
DataPlaneNamespace: configs.SystemNamespace,
ReceiverLabel: base.ChannelReceiverLabel,
},
- GetKafkaClusterAdmin: clientPool.GetClusterAdmin,
- Env: configs,
- ConfigMapLister: configmapInformer.Lister(),
- ServiceLister: serviceinformer.Get(ctx).Lister(),
- SubscriptionLister: subscriptioninformer.Get(ctx).Lister(),
- ConsumerGroupLister: consumerGroupInformer.Lister(),
- EventPolicyLister: eventPolicyInformer.Lister(),
- InternalsClient: consumergroupclient.Get(ctx),
- KafkaFeatureFlags: apisconfig.DefaultFeaturesConfig(),
+ Env: configs,
+ ConfigMapLister: configmapInformer.Lister(),
+ ServiceLister: serviceinformer.Get(ctx).Lister(),
+ SubscriptionLister: subscriptioninformer.Get(ctx).Lister(),
+ ConsumerGroupLister: consumerGroupInformer.Lister(),
+ EventPolicyLister: eventPolicyInformer.Lister(),
+ InternalsClient: consumergroupclient.Get(ctx),
+ KafkaFeatureFlags: apisconfig.DefaultFeaturesConfig(),
+ }
+
+ clientPool := clientpool.Get(ctx)
+ if clientPool == nil {
+ reconciler.GetKafkaClusterAdmin = clientpool.DisabledGetKafkaClusterAdminFunc
+ } else {
+ reconciler.GetKafkaClusterAdmin = clientPool.GetClusterAdmin
}
logger := logging.FromContext(ctx)
@@ -123,6 +129,10 @@ func NewController(ctx context.Context, watcher configmap.Watcher, configs *conf
}
})
+ globalResync = func(obj interface{}) {
+ impl.GlobalResync(channelInformer.Informer())
+ }
+
kafkaConfigStore := apisconfig.NewStore(ctx, func(name string, value *apisconfig.KafkaFeatureFlags) {
reconciler.KafkaFeatureFlags.Reset(value)
impl.GlobalResync(channelInformer.Informer())
@@ -177,5 +187,9 @@ func NewController(ctx context.Context, watcher configmap.Watcher, configs *conf
Handler: controller.HandleAll(consumergroup.Enqueue("kafkachannel", impl.EnqueueKey)),
})
+ channelGK := messagingv1beta.SchemeGroupVersion.WithKind("KafkaChannel").GroupKind()
+ // Enqueue KafkaChannel, if we have an EventPolicy which was referencing
+ // or got updated and now is referencing the KafkaSink
+ eventPolicyInformer.Informer().AddEventHandler(auth.EventPolicyEventHandler(channelInformer.Informer().GetIndexer(), channelGK, impl.EnqueueKey))
return impl
}
diff --git a/control-plane/pkg/reconciler/consumer/consumer.go b/control-plane/pkg/reconciler/consumer/consumer.go
index a3ea77aa19..40eaa33ede 100644
--- a/control-plane/pkg/reconciler/consumer/consumer.go
+++ b/control-plane/pkg/reconciler/consumer/consumer.go
@@ -131,6 +131,14 @@ func (r *Reconciler) reconcileContractResource(ctx context.Context, c *kafkainte
egress.VReplicas = 1
}
+ topLevelUserFacingResourceRef, err := r.reconcileTopLevelUserFacingResourceRef(c)
+ if err != nil {
+ return nil, fmt.Errorf("failed to reconcile top-level user facing resource reference: %w", err)
+ }
+ if topLevelUserFacingResourceRef == nil {
+ topLevelUserFacingResourceRef = userFacingResourceRef
+ }
+
resource := &contract.Resource{
Uid: string(c.UID),
Topics: c.Spec.Topics,
@@ -138,7 +146,7 @@ func (r *Reconciler) reconcileContractResource(ctx context.Context, c *kafkainte
Egresses: []*contract.Egress{egress},
Auth: nil, // Auth will be added by reconcileAuth
CloudEventOverrides: reconcileCEOverrides(c),
- Reference: userFacingResourceRef,
+ Reference: topLevelUserFacingResourceRef,
FeatureFlags: &contract.FeatureFlags{
EnableEventTypeAutocreate: feature.FromContext(ctx).IsEnabled(feature.EvenTypeAutoCreate),
},
@@ -303,6 +311,31 @@ func (r *Reconciler) reconcileUserFacingResourceRef(c *kafkainternals.Consumer)
return ref, nil
}
+func (r *Reconciler) reconcileTopLevelUserFacingResourceRef(c *kafkainternals.Consumer) (*contract.Reference, error) {
+
+ cg, err := r.ConsumerGroupLister.ConsumerGroups(c.GetNamespace()).Get(c.GetConsumerGroup().Name)
+ if apierrors.IsNotFound(err) {
+ return nil, nil
+ }
+ if err != nil {
+ return nil, fmt.Errorf("failed to get %s: %w", kafkainternals.ConsumerGroupGroupVersionKind.Kind, err)
+ }
+
+ userFacingResource := cg.GetTopLevelUserFacingResourceRef()
+ if userFacingResource == nil {
+ return nil, nil
+ }
+
+ ref := &contract.Reference{
+ Uuid: string(userFacingResource.UID),
+ Namespace: c.GetNamespace(),
+ Name: userFacingResource.Name,
+ Kind: userFacingResource.Kind,
+ GroupVersion: userFacingResource.APIVersion,
+ }
+ return ref, nil
+}
+
func reconcileDeliveryOrder(c *kafkainternals.Consumer) contract.DeliveryOrder {
if c.Spec.Delivery == nil {
return contract.DeliveryOrder_UNORDERED
diff --git a/control-plane/pkg/reconciler/consumergroup/consumergroup.go b/control-plane/pkg/reconciler/consumergroup/consumergroup.go
index f5d6dad34f..19baf9672d 100644
--- a/control-plane/pkg/reconciler/consumergroup/consumergroup.go
+++ b/control-plane/pkg/reconciler/consumergroup/consumergroup.go
@@ -452,7 +452,7 @@ func (r *Reconciler) schedule(ctx context.Context, cg *kafkainternals.ConsumerGr
return cg.MarkScheduleConsumerFailed("Schedule", err)
}
- placements, err := statefulSetScheduler.Schedule(cg)
+ placements, err := statefulSetScheduler.Schedule(ctx, cg)
if err != nil {
return cg.MarkScheduleConsumerFailed("Schedule", err)
}
@@ -769,7 +769,10 @@ func (r *Reconciler) reconcileSecret(ctx context.Context, expectedSecret *corev1
}
func (r *Reconciler) ensureContractConfigmapsExist(ctx context.Context, scheduler Scheduler) error {
- selector := labels.SelectorFromSet(map[string]string{"app": scheduler.StatefulSetName})
+ selector := labels.SelectorFromSet(map[string]string{
+ "app": scheduler.StatefulSetName,
+ "app.kubernetes.io/kind": "kafka-dispatcher",
+ })
pods, err := r.PodLister.
Pods(r.SystemNamespace).
List(selector)
diff --git a/control-plane/pkg/reconciler/consumergroup/consumergroup_test.go b/control-plane/pkg/reconciler/consumergroup/consumergroup_test.go
index e0f2aaeb54..49e959c2ae 100644
--- a/control-plane/pkg/reconciler/consumergroup/consumergroup_test.go
+++ b/control-plane/pkg/reconciler/consumergroup/consumergroup_test.go
@@ -61,10 +61,10 @@ import (
kedaclient "knative.dev/eventing-kafka-broker/third_party/pkg/client/injection/client/fake"
)
-type SchedulerFunc func(vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error)
+type SchedulerFunc func(ctx context.Context, vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error)
-func (f SchedulerFunc) Schedule(vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
- return f(vpod)
+func (f SchedulerFunc) Schedule(ctx context.Context, vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
+ return f(ctx, vpod)
}
const (
@@ -102,7 +102,7 @@ func TestReconcileKind(t *testing.T) {
},
Key: ConsumerGroupTestKey,
OtherTestData: map[string]interface{}{
- testSchedulerKey: SchedulerFunc(func(vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
+ testSchedulerKey: SchedulerFunc(func(_ context.Context, vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
return []eventingduckv1alpha1.Placement{
{PodName: "p1", VReplicas: 1},
{PodName: "p2", VReplicas: 1},
@@ -173,8 +173,8 @@ func TestReconcileKind(t *testing.T) {
Name: "Consumers in multiple pods, with pods pending and unknown phase",
Objects: []runtime.Object{
NewService(),
- NewDispatcherPod("p1", PodLabel(kafkainternals.SourceStatefulSetName), PodPending()),
- NewDispatcherPod("p2", PodLabel(kafkainternals.SourceStatefulSetName)),
+ NewDispatcherPod("p1", PodLabel("app", kafkainternals.SourceStatefulSetName), DispatcherLabel(), PodPending()),
+ NewDispatcherPod("p2", PodLabel("app", kafkainternals.SourceStatefulSetName), DispatcherLabel()),
NewConsumerGroup(
ConsumerGroupConsumerSpec(NewConsumerSpec(
ConsumerTopics("t1", "t2"),
@@ -189,7 +189,7 @@ func TestReconcileKind(t *testing.T) {
},
Key: ConsumerGroupTestKey,
OtherTestData: map[string]interface{}{
- testSchedulerKey: SchedulerFunc(func(vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
+ testSchedulerKey: SchedulerFunc(func(_ context.Context, vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
return []eventingduckv1alpha1.Placement{
{PodName: "p1", VReplicas: 1},
{PodName: "p2", VReplicas: 1},
@@ -307,7 +307,7 @@ func TestReconcileKind(t *testing.T) {
},
Key: ConsumerGroupTestKey,
OtherTestData: map[string]interface{}{
- testSchedulerKey: SchedulerFunc(func(vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
+ testSchedulerKey: SchedulerFunc(func(_ context.Context, vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
return []eventingduckv1alpha1.Placement{
{PodName: "p1", VReplicas: 1},
{PodName: "p2", VReplicas: 1},
@@ -402,7 +402,7 @@ func TestReconcileKind(t *testing.T) {
},
Key: ConsumerGroupTestKey,
OtherTestData: map[string]interface{}{
- testSchedulerKey: SchedulerFunc(func(vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
+ testSchedulerKey: SchedulerFunc(func(_ context.Context, vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
return []eventingduckv1alpha1.Placement{
{PodName: "p1", VReplicas: 1},
{PodName: "p2", VReplicas: 1},
@@ -528,7 +528,7 @@ func TestReconcileKind(t *testing.T) {
},
Key: ConsumerGroupTestKey,
OtherTestData: map[string]interface{}{
- testSchedulerKey: SchedulerFunc(func(vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
+ testSchedulerKey: SchedulerFunc(func(_ context.Context, vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
return []eventingduckv1alpha1.Placement{
{PodName: "p1", VReplicas: 1},
{PodName: "p2", VReplicas: 1},
@@ -702,7 +702,7 @@ func TestReconcileKind(t *testing.T) {
},
Key: ConsumerGroupTestKey,
OtherTestData: map[string]interface{}{
- testSchedulerKey: SchedulerFunc(func(vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
+ testSchedulerKey: SchedulerFunc(func(_ context.Context, vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
return []eventingduckv1alpha1.Placement{
{PodName: "p1", VReplicas: 1},
{PodName: "p2", VReplicas: 1},
@@ -877,7 +877,7 @@ func TestReconcileKind(t *testing.T) {
},
Key: ConsumerGroupTestKey,
OtherTestData: map[string]interface{}{
- testSchedulerKey: SchedulerFunc(func(vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
+ testSchedulerKey: SchedulerFunc(func(_ context.Context, vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
return []eventingduckv1alpha1.Placement{
{PodName: "p1", VReplicas: 1},
{PodName: "p2", VReplicas: 1},
@@ -1034,7 +1034,7 @@ func TestReconcileKind(t *testing.T) {
},
Key: ConsumerGroupTestKey,
OtherTestData: map[string]interface{}{
- testSchedulerKey: SchedulerFunc(func(vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
+ testSchedulerKey: SchedulerFunc(func(_ context.Context, vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
return []eventingduckv1alpha1.Placement{
{PodName: "p1", VReplicas: 1},
}, nil
@@ -1121,7 +1121,7 @@ func TestReconcileKind(t *testing.T) {
},
Key: ConsumerGroupTestKey,
OtherTestData: map[string]interface{}{
- testSchedulerKey: SchedulerFunc(func(vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
+ testSchedulerKey: SchedulerFunc(func(_ context.Context, vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
return []eventingduckv1alpha1.Placement{
{PodName: "p1", VReplicas: 1},
{PodName: "p2", VReplicas: 1},
@@ -1208,7 +1208,7 @@ func TestReconcileKind(t *testing.T) {
},
Key: ConsumerGroupTestKey,
OtherTestData: map[string]interface{}{
- testSchedulerKey: SchedulerFunc(func(vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
+ testSchedulerKey: SchedulerFunc(func(_ context.Context, vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
return []eventingduckv1alpha1.Placement{
{PodName: "p1", VReplicas: 1},
{PodName: "p2", VReplicas: 1},
@@ -1303,7 +1303,7 @@ func TestReconcileKind(t *testing.T) {
},
Key: ConsumerGroupTestKey,
OtherTestData: map[string]interface{}{
- testSchedulerKey: SchedulerFunc(func(vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
+ testSchedulerKey: SchedulerFunc(func(_ context.Context, vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
return []eventingduckv1alpha1.Placement{
{PodName: "p1", VReplicas: 1},
{PodName: "p2", VReplicas: 2},
@@ -1426,7 +1426,7 @@ func TestReconcileKind(t *testing.T) {
},
Key: ConsumerGroupTestKey,
OtherTestData: map[string]interface{}{
- testSchedulerKey: SchedulerFunc(func(vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
+ testSchedulerKey: SchedulerFunc(func(_ context.Context, vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
return []eventingduckv1alpha1.Placement{
{PodName: "p1", VReplicas: 1},
{PodName: "p2", VReplicas: 2},
@@ -1533,7 +1533,7 @@ func TestReconcileKind(t *testing.T) {
},
Key: ConsumerGroupTestKey,
OtherTestData: map[string]interface{}{
- testSchedulerKey: SchedulerFunc(func(vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
+ testSchedulerKey: SchedulerFunc(func(_ context.Context, vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
return []eventingduckv1alpha1.Placement{
{PodName: "p1", VReplicas: 1},
{PodName: "p2", VReplicas: 1},
@@ -1630,7 +1630,7 @@ func TestReconcileKind(t *testing.T) {
},
Key: ConsumerGroupTestKey,
OtherTestData: map[string]interface{}{
- testSchedulerKey: SchedulerFunc(func(vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
+ testSchedulerKey: SchedulerFunc(func(_ context.Context, vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
return nil, io.EOF
}),
},
@@ -1723,6 +1723,10 @@ func TestReconcileKind(t *testing.T) {
}
+func DispatcherLabel() PodOption {
+ return PodLabel("app.kubernetes.io/kind", "kafka-dispatcher")
+}
+
func TestReconcileKindNoAutoscaler(t *testing.T) {
tt := TableTest{
@@ -1758,7 +1762,7 @@ func TestReconcileKindNoAutoscaler(t *testing.T) {
},
Key: ConsumerGroupTestKey,
OtherTestData: map[string]interface{}{
- testSchedulerKey: SchedulerFunc(func(vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
+ testSchedulerKey: SchedulerFunc(func(_ context.Context, vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
return []eventingduckv1alpha1.Placement{
{PodName: "p1", VReplicas: 1},
{PodName: "p2", VReplicas: 1},
@@ -1922,7 +1926,7 @@ func TestFinalizeKind(t *testing.T) {
},
Key: testKey,
OtherTestData: map[string]interface{}{
- testSchedulerKey: SchedulerFunc(func(vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
+ testSchedulerKey: SchedulerFunc(func(_ context.Context, vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
return nil, nil
}),
},
@@ -1991,7 +1995,7 @@ func TestFinalizeKind(t *testing.T) {
},
Key: testKey,
OtherTestData: map[string]interface{}{
- testSchedulerKey: SchedulerFunc(func(vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
+ testSchedulerKey: SchedulerFunc(func(_ context.Context, vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
return nil, nil
}),
},
@@ -2117,7 +2121,7 @@ func TestFinalizeKind(t *testing.T) {
},
Key: testKey,
OtherTestData: map[string]interface{}{
- testSchedulerKey: SchedulerFunc(func(vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
+ testSchedulerKey: SchedulerFunc(func(_ context.Context, vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
return nil, nil
}),
},
@@ -2163,7 +2167,7 @@ func TestFinalizeKind(t *testing.T) {
},
Key: testKey,
OtherTestData: map[string]interface{}{
- testSchedulerKey: SchedulerFunc(func(vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
+ testSchedulerKey: SchedulerFunc(func(_ context.Context, vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
return nil, nil
}),
kafkatesting.ErrorOnDeleteConsumerGroupTestKey: sarama.ErrUnknownTopicOrPartition,
@@ -2210,7 +2214,7 @@ func TestFinalizeKind(t *testing.T) {
},
Key: testKey,
OtherTestData: map[string]interface{}{
- testSchedulerKey: SchedulerFunc(func(vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
+ testSchedulerKey: SchedulerFunc(func(_ context.Context, vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
return nil, nil
}),
kafkatesting.ErrorOnDeleteConsumerGroupTestKey: sarama.ErrGroupIDNotFound,
@@ -2258,7 +2262,7 @@ func TestFinalizeKind(t *testing.T) {
WantErr: true,
Key: testKey,
OtherTestData: map[string]interface{}{
- testSchedulerKey: SchedulerFunc(func(vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
+ testSchedulerKey: SchedulerFunc(func(_ context.Context, vpod scheduler.VPod) ([]eventingduckv1alpha1.Placement, error) {
return nil, nil
}),
kafkatesting.ErrorOnDeleteConsumerGroupTestKey: sarama.ErrClusterAuthorizationFailed,
diff --git a/control-plane/pkg/reconciler/consumergroup/controller.go b/control-plane/pkg/reconciler/consumergroup/controller.go
index 6e99d22763..8b653cae7e 100644
--- a/control-plane/pkg/reconciler/consumergroup/controller.go
+++ b/control-plane/pkg/reconciler/consumergroup/controller.go
@@ -123,8 +123,6 @@ func NewController(ctx context.Context, watcher configmap.Watcher) *controller.I
KafkaChannelScheduler: createKafkaScheduler(ctx, c, kafkainternals.ChannelStatefulSetName, dispatcherPodInformer),
}
- clientPool := clientpool.Get(ctx)
-
r := &Reconciler{
SchedulerFunc: func(s string) (Scheduler, bool) { sched, ok := schedulers[strings.ToLower(s)]; return sched, ok },
ConsumerLister: consumer.Get(ctx).Lister(),
@@ -134,10 +132,8 @@ func NewController(ctx context.Context, watcher configmap.Watcher) *controller.I
PodLister: dispatcherPodInformer.Lister(),
KubeClient: kubeclient.Get(ctx),
NameGenerator: names.SimpleNameGenerator,
- GetKafkaClient: clientPool.GetClient,
InitOffsetsFunc: offset.InitOffsets,
SystemNamespace: system.Namespace(),
- GetKafkaClusterAdmin: clientPool.GetClusterAdmin,
KafkaFeatureFlags: config.DefaultFeaturesConfig(),
KedaClient: kedaclient.Get(ctx),
AutoscalerConfig: env.AutoscalerConfigMap,
@@ -145,6 +141,15 @@ func NewController(ctx context.Context, watcher configmap.Watcher) *controller.I
InitOffsetLatestInitialOffsetCache: prober.NewLocalExpiringCache[string, prober.Status, struct{}](ctx, 20*time.Minute),
}
+ clientPool := clientpool.Get(ctx)
+ if clientPool == nil {
+ r.GetKafkaClusterAdmin = clientpool.DisabledGetKafkaClusterAdminFunc
+ r.GetKafkaClient = clientpool.DisabledGetClient
+ } else {
+ r.GetKafkaClusterAdmin = clientPool.GetClusterAdmin
+ r.GetKafkaClient = clientPool.GetClient
+ }
+
consumerInformer := consumer.Get(ctx)
consumerGroupInformer := consumergroup.Get(ctx)
diff --git a/control-plane/pkg/reconciler/sink/controller.go b/control-plane/pkg/reconciler/sink/controller.go
index b18f5ba9aa..0e41d5ebbc 100644
--- a/control-plane/pkg/reconciler/sink/controller.go
+++ b/control-plane/pkg/reconciler/sink/controller.go
@@ -22,6 +22,8 @@ import (
"net"
"net/http"
+ "knative.dev/eventing/pkg/auth"
+
eventpolicyinformer "knative.dev/eventing/pkg/client/injection/informers/eventing/v1alpha1/eventpolicy"
"go.uber.org/zap"
@@ -57,8 +59,6 @@ func NewController(ctx context.Context, watcher configmap.Watcher, configs *conf
configmapInformer := configmapinformer.Get(ctx)
eventPolicyInformer := eventpolicyinformer.Get(ctx)
- clientPool := clientpool.Get(ctx)
-
reconciler := &Reconciler{
Reconciler: &base.Reconciler{
KubeClient: kubeclient.Get(ctx),
@@ -70,10 +70,16 @@ func NewController(ctx context.Context, watcher configmap.Watcher, configs *conf
DataPlaneNamespace: configs.SystemNamespace,
ReceiverLabel: base.SinkReceiverLabel,
},
- ConfigMapLister: configmapInformer.Lister(),
- EventPolicyLister: eventPolicyInformer.Lister(),
- GetKafkaClusterAdmin: clientPool.GetClusterAdmin,
- Env: configs,
+ ConfigMapLister: configmapInformer.Lister(),
+ EventPolicyLister: eventPolicyInformer.Lister(),
+ Env: configs,
+ }
+
+ clientPool := clientpool.Get(ctx)
+ if clientPool == nil {
+ reconciler.GetKafkaClusterAdmin = clientpool.DisabledGetKafkaClusterAdminFunc
+ } else {
+ reconciler.GetKafkaClusterAdmin = clientPool.GetClusterAdmin
}
_, err := reconciler.GetOrCreateDataPlaneConfigMap(ctx)
@@ -161,5 +167,11 @@ func NewController(ctx context.Context, watcher configmap.Watcher, configs *conf
DeleteFunc: reconciler.OnDeleteObserver,
})
+ sinkGK := eventing.SchemeGroupVersion.WithKind("KafkaSink").GroupKind()
+
+ // Enqueue the KafkaSink, if we have an EventPolicy which was referencing
+ // or got updated and now is referencing the KafkaSink
+ eventPolicyInformer.Informer().AddEventHandler(auth.EventPolicyEventHandler(sinkInformer.Informer().GetIndexer(), sinkGK, impl.EnqueueKey))
+
return impl
}
diff --git a/control-plane/pkg/reconciler/sink/kafka_sink.go b/control-plane/pkg/reconciler/sink/kafka_sink.go
index 37d1b9dc25..b7513854b8 100644
--- a/control-plane/pkg/reconciler/sink/kafka_sink.go
+++ b/control-plane/pkg/reconciler/sink/kafka_sink.go
@@ -21,7 +21,7 @@ import (
"fmt"
"time"
- eventingduck "knative.dev/eventing/pkg/apis/duck/v1"
+ "knative.dev/eventing/pkg/apis/eventing/v1alpha1"
corev1 "k8s.io/api/core/v1"
eventingv1alpha1listers "knative.dev/eventing/pkg/client/listers/eventing/v1alpha1"
@@ -100,8 +100,6 @@ func (r *Reconciler) reconcileKind(ctx context.Context, ks *eventing.KafkaSink)
Recorder: controller.GetEventRecorder(ctx),
}
- r.markEventPolicyConditionNotYetSupported(ks)
-
if !r.IsReceiverRunning() {
return statusConditionManager.DataPlaneNotAvailable()
}
@@ -170,6 +168,11 @@ func (r *Reconciler) reconcileKind(ctx context.Context, ks *eventing.KafkaSink)
logger.Debug("Got contract config map")
+ applyingEventPolicies, err := auth.GetEventPoliciesForResource(r.EventPolicyLister, eventing.SchemeGroupVersion.WithKind("KafkaSink"), ks.ObjectMeta)
+ if err != nil {
+ return fmt.Errorf("could not get applying eventpolicies for kafkasink: %v", err)
+ }
+
// Get contract data.
ct, err := r.GetDataPlaneConfigMapData(logger, contractConfigMap)
if err != nil && ct == nil {
@@ -198,10 +201,7 @@ func (r *Reconciler) reconcileKind(ctx context.Context, ks *eventing.KafkaSink)
}
// Get sink configuration.
- sinkConfig, err := r.getSinkContractResource(ctx, ks, secret, audience, ks.Status.AppliedEventPoliciesStatus)
- if err != nil {
- return statusConditionManager.FailedToResolveConfig(err)
- }
+ sinkConfig := r.getSinkContractResource(ctx, ks, secret, audience, applyingEventPolicies)
statusConditionManager.ConfigResolved()
sinkIndex := coreconfig.FindResource(ct, ks.UID)
@@ -234,6 +234,11 @@ func (r *Reconciler) reconcileKind(ctx context.Context, ks *eventing.KafkaSink)
logger.Debug("Updated receiver pod annotation")
+ err = auth.UpdateStatusWithProvidedEventPolicies(features, &ks.Status.AppliedEventPoliciesStatus, &ks.Status, applyingEventPolicies)
+ if err != nil {
+ return fmt.Errorf("could not update KafkaSinks status with EventPolicies: %v", err)
+ }
+
var addressableStatus duckv1.AddressStatus
if features.IsPermissiveTransportEncryption() {
caCerts, err := r.getCaCerts()
@@ -294,14 +299,6 @@ func (r *Reconciler) reconcileKind(ctx context.Context, ks *eventing.KafkaSink)
return nil
}
-func (r *Reconciler) markEventPolicyConditionNotYetSupported(ks *eventing.KafkaSink) {
- ks.Status.GetConditionSet().Manage(ks.GetStatus()).MarkTrueWithReason(
- base.ConditionEventPoliciesReady,
- "AuthzNotSupported",
- "Authorization not yet supported",
- )
-}
-
func (r *Reconciler) FinalizeKind(ctx context.Context, ks *eventing.KafkaSink) reconciler.Event {
return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
return r.finalizeKind(ctx, ks)
@@ -431,14 +428,15 @@ func (r *Reconciler) setTrustBundles(ct *contract.Contract) error {
return nil
}
-func (r *Reconciler) getSinkContractResource(ctx context.Context, kafkaSink *eventingv1alpha1.KafkaSink, secret *corev1.Secret, audience *string, appliedEventPoliciesStatus eventingduck.AppliedEventPoliciesStatus) (*contract.Resource, error) {
+func (r *Reconciler) getSinkContractResource(ctx context.Context, kafkaSink *eventingv1alpha1.KafkaSink, secret *corev1.Secret, audience *string, applyingEventPolicies []*v1alpha1.EventPolicy) *contract.Resource {
features := feature.FromContext(ctx)
sinkConfig := &contract.Resource{
Uid: string(kafkaSink.UID),
Topics: []string{kafkaSink.Spec.Topic},
Ingress: &contract.Ingress{
- Path: receiver.PathFromObject(kafkaSink),
- ContentMode: coreconfig.ContentModeFromString(*kafkaSink.Spec.ContentMode),
+ Path: receiver.PathFromObject(kafkaSink),
+ ContentMode: coreconfig.ContentModeFromString(*kafkaSink.Spec.ContentMode),
+ EventPolicies: coreconfig.ContractEventPoliciesFromEventPolicies(applyingEventPolicies, kafkaSink.Namespace, features),
},
FeatureFlags: &contract.FeatureFlags{
EnableEventTypeAutocreate: features.IsEnabled(feature.EvenTypeAutoCreate),
@@ -467,11 +465,5 @@ func (r *Reconciler) getSinkContractResource(ctx context.Context, kafkaSink *eve
sinkConfig.Ingress.Audience = *audience
}
- eventPolicies, err := coreconfig.EventPoliciesFromAppliedEventPoliciesStatus(appliedEventPoliciesStatus, r.EventPolicyLister, kafkaSink.Namespace, features)
- if err != nil {
- return nil, fmt.Errorf("could not get eventpolicies from kafkasink status: %w", err)
- }
- sinkConfig.Ingress.EventPolicies = eventPolicies
-
- return sinkConfig, nil
+ return sinkConfig
}
diff --git a/control-plane/pkg/reconciler/sink/kafka_sink_test.go b/control-plane/pkg/reconciler/sink/kafka_sink_test.go
index 11b95e659d..3e9e5a14ef 100644
--- a/control-plane/pkg/reconciler/sink/kafka_sink_test.go
+++ b/control-plane/pkg/reconciler/sink/kafka_sink_test.go
@@ -22,6 +22,8 @@ import (
"io"
"testing"
+ reconcilertesting "knative.dev/eventing/pkg/reconciler/testing/v1"
+
"knative.dev/eventing/pkg/auth"
"k8s.io/utils/pointer"
@@ -81,6 +83,9 @@ const (
testProber = "testProber"
TopicPrefix = "knative-sink-"
+
+ readyEventPolicyName = "test-event-policy-ready"
+ unreadyEventPolicyName = "test-event-policy-unready"
)
var (
@@ -103,6 +108,12 @@ var (
Namespace: SinkNamespace,
})
+ sinkGVK = metav1.GroupVersionKind{
+ Group: "eventing.knative.dev",
+ Version: "v1alpha1",
+ Kind: "KafkaSink",
+ }
+
errCreateTopic = fmt.Errorf("failed to create topic")
errDeleteTopic = fmt.Errorf("failed to delete topic")
@@ -195,7 +206,7 @@ func sinkReconciliation(t *testing.T, format string, env config.Env) {
},
}),
WithSinkAddessable(),
- WithSinkEventPolicyConditionAuthZNotSupported(),
+ WithSinkEventPoliciesReadyBecauseOIDCDisabled(),
),
},
},
@@ -262,7 +273,7 @@ func sinkReconciliation(t *testing.T, format string, env config.Env) {
},
}),
WithSinkAddessable(),
- WithSinkEventPolicyConditionAuthZNotSupported(),
+ WithSinkEventPoliciesReadyBecauseOIDCDisabled(),
),
},
},
@@ -339,7 +350,7 @@ func sinkReconciliation(t *testing.T, format string, env config.Env) {
},
}),
WithSinkAddessable(),
- WithSinkEventPolicyConditionAuthZNotSupported(),
+ WithSinkEventPoliciesReadyBecauseOIDCDisabled(),
),
},
},
@@ -418,7 +429,7 @@ func sinkReconciliation(t *testing.T, format string, env config.Env) {
},
}),
WithSinkAddessable(),
- WithSinkEventPolicyConditionAuthZNotSupported(),
+ WithSinkEventPoliciesReadyBecauseOIDCDisabled(),
),
},
},
@@ -466,7 +477,6 @@ func sinkReconciliation(t *testing.T, format string, env config.Env) {
InitSinkConditions,
StatusDataPlaneAvailable,
StatusTopicNotPresentErr(SinkTopic(), io.EOF),
- WithSinkEventPolicyConditionAuthZNotSupported(),
),
},
},
@@ -545,7 +555,7 @@ func sinkReconciliation(t *testing.T, format string, env config.Env) {
},
}),
WithSinkAddessable(),
- WithSinkEventPolicyConditionAuthZNotSupported(),
+ WithSinkEventPoliciesReadyBecauseOIDCDisabled(),
),
},
},
@@ -584,7 +594,6 @@ func sinkReconciliation(t *testing.T, format string, env config.Env) {
StatusDataPlaneAvailable,
BootstrapServers(bootstrapServersArr),
StatusFailedToCreateTopic(SinkTopic()),
- WithSinkEventPolicyConditionAuthZNotSupported(),
),
},
},
@@ -660,7 +669,7 @@ func sinkReconciliation(t *testing.T, format string, env config.Env) {
},
}),
WithSinkAddessable(),
- WithSinkEventPolicyConditionAuthZNotSupported(),
+ WithSinkEventPoliciesReadyBecauseOIDCDisabled(),
),
},
},
@@ -723,7 +732,7 @@ func sinkReconciliation(t *testing.T, format string, env config.Env) {
},
}),
WithSinkAddessable(),
- WithSinkEventPolicyConditionAuthZNotSupported(),
+ WithSinkEventPoliciesReadyBecauseOIDCDisabled(),
),
},
},
@@ -809,7 +818,7 @@ func sinkReconciliation(t *testing.T, format string, env config.Env) {
},
}),
WithSinkAddessable(),
- WithSinkEventPolicyConditionAuthZNotSupported(),
+ WithSinkEventPoliciesReadyBecauseOIDCDisabled(),
),
},
},
@@ -895,7 +904,7 @@ func sinkReconciliation(t *testing.T, format string, env config.Env) {
},
}),
WithSinkAddessable(),
- WithSinkEventPolicyConditionAuthZNotSupported(),
+ WithSinkEventPoliciesReadyBecauseOIDCDisabled(),
),
},
},
@@ -923,7 +932,6 @@ func sinkReconciliation(t *testing.T, format string, env config.Env) {
Object: NewSink(
InitSinkConditions,
StatusDataPlaneNotAvailable,
- WithSinkEventPolicyConditionAuthZNotSupported(),
),
},
},
@@ -991,7 +999,7 @@ func sinkReconciliation(t *testing.T, format string, env config.Env) {
},
}),
WithSinkAddessable(),
- WithSinkEventPolicyConditionAuthZNotSupported(),
+ WithSinkEventPoliciesReadyBecauseOIDCDisabled(),
),
},
},
@@ -1064,7 +1072,7 @@ func sinkReconciliation(t *testing.T, format string, env config.Env) {
},
}),
WithSinkAddessable(),
- WithSinkEventPolicyConditionAuthZNotSupported(),
+ WithSinkEventPoliciesReadyBecauseOIDCDisabled(),
),
},
},
@@ -1117,7 +1125,7 @@ func sinkReconciliation(t *testing.T, format string, env config.Env) {
StatusConfigMapUpdatedReady(&env),
StatusTopicReadyWithOwner(SinkTopic(), sink.ControllerTopicOwner),
StatusProbeFailed(prober.StatusNotReady),
- WithSinkEventPolicyConditionAuthZNotSupported(),
+ WithSinkEventPoliciesReadyBecauseOIDCDisabled(),
),
},
},
@@ -1173,7 +1181,7 @@ func sinkReconciliation(t *testing.T, format string, env config.Env) {
StatusConfigMapUpdatedReady(&env),
StatusTopicReadyWithOwner(SinkTopic(), sink.ControllerTopicOwner),
StatusProbeFailed(prober.StatusUnknown),
- WithSinkEventPolicyConditionAuthZNotSupported(),
+ WithSinkEventPoliciesReadyBecauseOIDCDisabled(),
),
},
},
@@ -1250,7 +1258,7 @@ func sinkReconciliation(t *testing.T, format string, env config.Env) {
},
}),
WithSinkAddessable(),
- WithSinkEventPolicyConditionAuthZNotSupported(),
+ WithSinkEventPoliciesReadyBecauseOIDCDisabled(),
),
},
},
@@ -1321,14 +1329,89 @@ func sinkReconciliation(t *testing.T, format string, env config.Env) {
},
}),
WithSinkAddessable(),
- WithSinkEventPolicyConditionAuthZNotSupported(),
+ WithSinkEventPoliciesReadyBecauseOIDCDisabled(),
),
},
},
}, {
Name: "Reconciled normal - OIDC enabled - should provision audience",
Ctx: feature.ToContext(context.Background(), feature.Flags{
- feature.OIDCAuthentication: feature.Enabled,
+ feature.OIDCAuthentication: feature.Enabled,
+ feature.AuthorizationDefaultMode: feature.AuthorizationDenyAll,
+ }),
+ Objects: []runtime.Object{
+ NewSink(
+ StatusControllerOwnsTopic(sink.ControllerTopicOwner),
+ ),
+ NewConfigMapWithBinaryData(env.DataPlaneConfigMapNamespace, env.ContractConfigMapName, nil),
+ SinkReceiverPod(env.SystemNamespace, map[string]string{
+ "annotation_to_preserve": "value_to_preserve",
+ }),
+ },
+ Key: testKey,
+ WantEvents: []string{
+ finalizerUpdatedEvent,
+ },
+ WantUpdates: []clientgotesting.UpdateActionImpl{
+ ConfigMapUpdate(env.DataPlaneConfigMapNamespace, env.ContractConfigMapName, env.ContractConfigMapFormat, &contract.Contract{
+ Resources: []*contract.Resource{
+ {
+ Uid: SinkUUID,
+ Topics: []string{SinkTopic()},
+ Ingress: &contract.Ingress{
+ ContentMode: contract.ContentMode_BINARY,
+ Path: receiver.Path(SinkNamespace, SinkName),
+ Audience: sinkAudience,
+ },
+ BootstrapServers: bootstrapServers,
+ Reference: SinkReference(),
+ FeatureFlags: FeatureFlagsETAutocreate(false),
+ },
+ },
+ Generation: 1,
+ }),
+ SinkReceiverPodUpdate(env.SystemNamespace, map[string]string{
+ base.VolumeGenerationAnnotationKey: "1",
+ "annotation_to_preserve": "value_to_preserve",
+ }),
+ },
+ WantPatches: []clientgotesting.PatchActionImpl{
+ patchFinalizers(),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{
+ {
+ Object: NewSink(
+ StatusControllerOwnsTopic(sink.ControllerTopicOwner),
+ InitSinkConditions,
+ StatusDataPlaneAvailable,
+ StatusConfigParsed,
+ BootstrapServers(bootstrapServersArr),
+ StatusConfigMapUpdatedReady(&env),
+ StatusTopicReadyWithOwner(SinkTopic(), sink.ControllerTopicOwner),
+ SinkAddressable(&env),
+ StatusProbeSucceeded,
+ WithSinkAddress(duckv1.Addressable{
+ Name: pointer.String("http"),
+ URL: sinkAddress,
+ Audience: &sinkAudience,
+ }),
+ WithSinkAddresses([]duckv1.Addressable{
+ {
+ Name: pointer.String("http"),
+ URL: sinkAddress,
+ Audience: &sinkAudience,
+ },
+ }),
+ WithSinkAddessable(),
+ WithSinkEventPoliciesReadyBecauseNoPolicyAndOIDCEnabled(feature.AuthorizationDenyAll),
+ ),
+ },
+ },
+ }, {
+ Name: "Should list applying EventPolicies",
+ Ctx: feature.ToContext(context.Background(), feature.Flags{
+ feature.OIDCAuthentication: feature.Enabled,
+ feature.AuthorizationDefaultMode: feature.AuthorizationAllowSameNamespace,
}),
Objects: []runtime.Object{
NewSink(
@@ -1338,6 +1421,108 @@ func sinkReconciliation(t *testing.T, format string, env config.Env) {
SinkReceiverPod(env.SystemNamespace, map[string]string{
"annotation_to_preserve": "value_to_preserve",
}),
+ reconcilertesting.NewEventPolicy(readyEventPolicyName, SinkNamespace,
+ reconcilertesting.WithReadyEventPolicyCondition,
+ reconcilertesting.WithEventPolicyToRef(sinkGVK, SinkName),
+ reconcilertesting.WithEventPolicyStatusFromSub([]string{
+ "sub",
+ }),
+ ),
+ },
+ Key: testKey,
+ WantEvents: []string{
+ finalizerUpdatedEvent,
+ },
+ WantUpdates: []clientgotesting.UpdateActionImpl{
+ ConfigMapUpdate(env.DataPlaneConfigMapNamespace, env.ContractConfigMapName, env.ContractConfigMapFormat, &contract.Contract{
+ Resources: []*contract.Resource{
+ {
+ Uid: SinkUUID,
+ Topics: []string{SinkTopic()},
+ Ingress: &contract.Ingress{
+ ContentMode: contract.ContentMode_BINARY,
+ Path: receiver.Path(SinkNamespace, SinkName),
+ Audience: sinkAudience,
+ EventPolicies: []*contract.EventPolicy{
+ {
+ TokenMatchers: []*contract.TokenMatcher{
+ {
+ Matcher: &contract.TokenMatcher_Exact{
+ Exact: &contract.Exact{
+ Attributes: map[string]string{
+ "sub": "sub",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ BootstrapServers: bootstrapServers,
+ Reference: SinkReference(),
+ FeatureFlags: FeatureFlagsETAutocreate(false),
+ },
+ },
+ Generation: 1,
+ }),
+ SinkReceiverPodUpdate(env.SystemNamespace, map[string]string{
+ base.VolumeGenerationAnnotationKey: "1",
+ "annotation_to_preserve": "value_to_preserve",
+ }),
+ },
+ WantPatches: []clientgotesting.PatchActionImpl{
+ patchFinalizers(),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{
+ {
+ Object: NewSink(
+ StatusControllerOwnsTopic(sink.ControllerTopicOwner),
+ InitSinkConditions,
+ StatusDataPlaneAvailable,
+ StatusConfigParsed,
+ BootstrapServers(bootstrapServersArr),
+ StatusConfigMapUpdatedReady(&env),
+ StatusTopicReadyWithOwner(SinkTopic(), sink.ControllerTopicOwner),
+ SinkAddressable(&env),
+ StatusProbeSucceeded,
+ WithSinkAddress(duckv1.Addressable{
+ Name: pointer.String("http"),
+ URL: sinkAddress,
+ Audience: &sinkAudience,
+ }),
+ WithSinkAddresses([]duckv1.Addressable{
+ {
+ Name: pointer.String("http"),
+ URL: sinkAddress,
+ Audience: &sinkAudience,
+ },
+ }),
+ WithSinkAddessable(),
+ WithSinkEventPoliciesReady(),
+ WithSinkEventPoliciesListed(readyEventPolicyName),
+ ),
+ },
+ },
+ }, {
+ Name: "Should mark as NotReady on unready EventPolicies",
+ Ctx: feature.ToContext(context.Background(), feature.Flags{
+ feature.OIDCAuthentication: feature.Enabled,
+ feature.AuthorizationDefaultMode: feature.AuthorizationAllowSameNamespace,
+ }),
+ Objects: []runtime.Object{
+ NewSink(
+ StatusControllerOwnsTopic(sink.ControllerTopicOwner),
+ ),
+ NewConfigMapWithBinaryData(env.DataPlaneConfigMapNamespace, env.ContractConfigMapName, nil),
+ SinkReceiverPod(env.SystemNamespace, map[string]string{
+ "annotation_to_preserve": "value_to_preserve",
+ }),
+ reconcilertesting.NewEventPolicy(unreadyEventPolicyName, SinkNamespace,
+ reconcilertesting.WithUnreadyEventPolicyCondition("", ""),
+ reconcilertesting.WithEventPolicyToRef(sinkGVK, SinkName),
+ reconcilertesting.WithEventPolicyStatusFromSub([]string{"sub"}),
+ ),
},
Key: testKey,
WantEvents: []string{
@@ -1353,6 +1538,21 @@ func sinkReconciliation(t *testing.T, format string, env config.Env) {
ContentMode: contract.ContentMode_BINARY,
Path: receiver.Path(SinkNamespace, SinkName),
Audience: sinkAudience,
+ EventPolicies: []*contract.EventPolicy{
+ {
+ TokenMatchers: []*contract.TokenMatcher{
+ {
+ Matcher: &contract.TokenMatcher_Prefix{
+ Prefix: &contract.Prefix{
+ Attributes: map[string]string{
+ "sub": "system:serviceaccount:" + SinkNamespace + ":",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
},
BootstrapServers: bootstrapServers,
Reference: SinkReference(),
@@ -1394,7 +1594,7 @@ func sinkReconciliation(t *testing.T, format string, env config.Env) {
},
}),
WithSinkAddessable(),
- WithSinkEventPolicyConditionAuthZNotSupported(),
+ WithSinkEventPoliciesNotReady("EventPoliciesNotReady", fmt.Sprintf("event policies %s are not ready", unreadyEventPolicyName)),
),
},
},
diff --git a/control-plane/pkg/reconciler/testing/objects_broker.go b/control-plane/pkg/reconciler/testing/objects_broker.go
index e07e79ce9d..40d85c4903 100644
--- a/control-plane/pkg/reconciler/testing/objects_broker.go
+++ b/control-plane/pkg/reconciler/testing/objects_broker.go
@@ -424,7 +424,8 @@ func BrokerDispatcherPod(namespace string, annotations map[string]string) runtim
Namespace: namespace,
Annotations: annotations,
Labels: map[string]string{
- "app": base.BrokerDispatcherLabel,
+ "app": base.BrokerDispatcherLabel,
+ "app.kubernetes.io/kind": "kafka-dispatcher",
},
},
Status: corev1.PodStatus{
diff --git a/control-plane/pkg/reconciler/testing/objects_channel.go b/control-plane/pkg/reconciler/testing/objects_channel.go
index 76d3c5850c..60ea5b17bb 100644
--- a/control-plane/pkg/reconciler/testing/objects_channel.go
+++ b/control-plane/pkg/reconciler/testing/objects_channel.go
@@ -43,6 +43,8 @@ import (
"knative.dev/eventing-kafka-broker/control-plane/pkg/contract"
"knative.dev/eventing-kafka-broker/control-plane/pkg/reconciler/base"
+ eventingv1alpha1 "knative.dev/eventing/pkg/apis/eventing/v1alpha1"
+ "knative.dev/eventing/pkg/apis/feature"
subscriptionv1 "knative.dev/eventing/pkg/reconciler/testing/v1"
)
@@ -191,7 +193,8 @@ func ChannelDispatcherPod(namespace string, annotations map[string]string) runti
Namespace: namespace,
Annotations: annotations,
Labels: map[string]string{
- "app": base.ChannelDispatcherLabel,
+ "app": base.ChannelDispatcherLabel,
+ "app.kubernetes.io/kind": "kafka-dispatcher",
},
},
Status: corev1.PodStatus{
@@ -446,3 +449,43 @@ func ChannelAddress() *apis.URL {
Host: fmt.Sprintf("%s.%s.svc.%s", ChannelServiceName, ChannelNamespace, network.GetClusterDomainName()),
}
}
+
+func WithChannelEventPoliciesReady() KRShapedOption {
+ return func(obj duckv1.KRShaped) {
+ ks := obj.(*messagingv1beta1.KafkaChannel)
+ ks.Status.MarkEventPoliciesTrue()
+ }
+}
+
+func WithChannelEventPoliciesNotReady(reason, message string) KRShapedOption {
+ return func(obj duckv1.KRShaped) {
+ ks := obj.(*messagingv1beta1.KafkaChannel)
+ ks.Status.MarkEventPoliciesFailed(reason, message)
+ }
+}
+
+func WithChannelEventPoliciesListed(policyNames ...string) KRShapedOption {
+ return func(obj duckv1.KRShaped) {
+ ks := obj.(*messagingv1beta1.KafkaChannel)
+ for _, name := range policyNames {
+ ks.Status.Policies = append(ks.Status.Policies, eventingduckv1.AppliedEventPolicyRef{
+ APIVersion: eventingv1alpha1.SchemeGroupVersion.String(),
+ Name: name,
+ })
+ }
+ }
+}
+
+func WithChannelEventPoliciesReadyBecauseOIDCDisabled() KRShapedOption {
+ return func(obj duckv1.KRShaped) {
+ ks := obj.(*messagingv1beta1.KafkaChannel)
+ ks.Status.MarkEventPoliciesTrueWithReason("OIDCDisabled", "Feature %q must be enabled to support Authorization", feature.OIDCAuthentication)
+ }
+}
+
+func WithChannelEventPoliciesReadyBecauseNoPolicyAndOIDCEnabled(authzMode feature.Flag) KRShapedOption {
+ return func(obj duckv1.KRShaped) {
+ ks := obj.(*messagingv1beta1.KafkaChannel)
+ ks.Status.MarkEventPoliciesTrueWithReason("DefaultAuthorizationMode", "Default authz mode is %q", authzMode)
+ }
+}
diff --git a/control-plane/pkg/reconciler/testing/objects_common.go b/control-plane/pkg/reconciler/testing/objects_common.go
index f3d8a939cf..7fa9612fdb 100644
--- a/control-plane/pkg/reconciler/testing/objects_common.go
+++ b/control-plane/pkg/reconciler/testing/objects_common.go
@@ -616,12 +616,12 @@ func NewDispatcherPod(name string, options ...PodOption) *corev1.Pod {
return p
}
-func PodLabel(value string) PodOption {
+func PodLabel(key, value string) PodOption {
return func(pod *corev1.Pod) {
if pod.Labels == nil {
pod.Labels = make(map[string]string, 2)
}
- pod.Labels["app"] = value
+ pod.Labels[key] = value
}
}
diff --git a/control-plane/pkg/reconciler/testing/objects_consumergroup.go b/control-plane/pkg/reconciler/testing/objects_consumergroup.go
index bdc0f83f4d..5741cde578 100644
--- a/control-plane/pkg/reconciler/testing/objects_consumergroup.go
+++ b/control-plane/pkg/reconciler/testing/objects_consumergroup.go
@@ -248,3 +248,9 @@ func WithConfigmapOwnerRef(ownerref *metav1.OwnerReference) reconcilertesting.Co
cg.ObjectMeta.OwnerReferences = []metav1.OwnerReference{*ownerref}
}
}
+
+func WithTopLevelResourceRef(ref *corev1.ObjectReference) ConsumerGroupOption {
+ return func(cg *kafkainternals.ConsumerGroup) {
+ cg.Spec.TopLevelResourceRef = ref
+ }
+}
diff --git a/control-plane/pkg/reconciler/testing/objects_sink.go b/control-plane/pkg/reconciler/testing/objects_sink.go
index c015626700..a3dfa92e3a 100644
--- a/control-plane/pkg/reconciler/testing/objects_sink.go
+++ b/control-plane/pkg/reconciler/testing/objects_sink.go
@@ -20,6 +20,10 @@ import (
"context"
"fmt"
+ eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1"
+ eventingv1alpha1 "knative.dev/eventing/pkg/apis/eventing/v1alpha1"
+ "knative.dev/eventing/pkg/apis/feature"
+
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -204,13 +208,42 @@ func WithSinkAddessable() KRShapedOption {
ch.GetConditionSet().Manage(ch.GetStatus()).MarkTrue(base.ConditionAddressable)
}
}
-func WithSinkEventPolicyConditionAuthZNotSupported() KRShapedOption {
+func WithSinkEventPoliciesReady() KRShapedOption {
return func(obj duckv1.KRShaped) {
- ch := obj.(*eventing.KafkaSink)
- ch.GetConditionSet().Manage(ch.GetStatus()).MarkTrueWithReason(
- base.ConditionEventPoliciesReady,
- "AuthzNotSupported",
- "Authorization not yet supported",
- )
+ ks := obj.(*eventing.KafkaSink)
+ ks.Status.MarkEventPoliciesTrue()
+ }
+}
+
+func WithSinkEventPoliciesNotReady(reason, message string) KRShapedOption {
+ return func(obj duckv1.KRShaped) {
+ ks := obj.(*eventing.KafkaSink)
+ ks.Status.MarkEventPoliciesFailed(reason, message)
+ }
+}
+
+func WithSinkEventPoliciesListed(policyNames ...string) KRShapedOption {
+ return func(obj duckv1.KRShaped) {
+ ks := obj.(*eventing.KafkaSink)
+ for _, name := range policyNames {
+ ks.Status.Policies = append(ks.Status.Policies, eventingduckv1.AppliedEventPolicyRef{
+ APIVersion: eventingv1alpha1.SchemeGroupVersion.String(),
+ Name: name,
+ })
+ }
+ }
+}
+
+func WithSinkEventPoliciesReadyBecauseOIDCDisabled() KRShapedOption {
+ return func(obj duckv1.KRShaped) {
+ ks := obj.(*eventing.KafkaSink)
+ ks.Status.MarkEventPoliciesTrueWithReason("OIDCDisabled", "Feature %q must be enabled to support Authorization", feature.OIDCAuthentication)
+ }
+}
+
+func WithSinkEventPoliciesReadyBecauseNoPolicyAndOIDCEnabled(authzMode feature.Flag) KRShapedOption {
+ return func(obj duckv1.KRShaped) {
+ ks := obj.(*eventing.KafkaSink)
+ ks.Status.MarkEventPoliciesTrueWithReason("DefaultAuthorizationMode", "Default authz mode is %q", authzMode)
}
}
diff --git a/control-plane/pkg/reconciler/testing/objects_source.go b/control-plane/pkg/reconciler/testing/objects_source.go
index af11432cf7..58b3bb8c8d 100644
--- a/control-plane/pkg/reconciler/testing/objects_source.go
+++ b/control-plane/pkg/reconciler/testing/objects_source.go
@@ -23,14 +23,15 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/utils/pointer"
- "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/bindings/v1beta1"
- sources "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/sources/v1beta1"
eventingduck "knative.dev/eventing/pkg/apis/duck/v1"
"knative.dev/eventing/pkg/apis/feature"
"knative.dev/eventing/pkg/eventingtls/eventingtlstesting"
"knative.dev/pkg/apis"
duckv1 "knative.dev/pkg/apis/duck/v1"
+ "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/bindings/v1beta1"
+ sources "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/sources/v1beta1"
+
"knative.dev/eventing-kafka-broker/control-plane/pkg/contract"
"knative.dev/eventing-kafka-broker/control-plane/pkg/reconciler/base"
)
@@ -215,7 +216,8 @@ func SourceDispatcherPod(namespace string, annotations map[string]string) runtim
Namespace: namespace,
Annotations: annotations,
Labels: map[string]string{
- "app": base.SourceDispatcherLabel,
+ "app": base.SourceDispatcherLabel,
+ "app.kubernetes.io/kind": "kafka-dispatcher",
},
},
Status: corev1.PodStatus{
diff --git a/control-plane/pkg/reconciler/trigger/controller.go b/control-plane/pkg/reconciler/trigger/controller.go
index 41138ca061..1d576ab94c 100644
--- a/control-plane/pkg/reconciler/trigger/controller.go
+++ b/control-plane/pkg/reconciler/trigger/controller.go
@@ -72,8 +72,6 @@ func NewController(ctx context.Context, watcher configmap.Watcher, configs *conf
triggerLister := triggerInformer.Lister()
oidcServiceaccountInformer := serviceaccountinformer.Get(ctx, auth.OIDCLabelSelector)
- clientPool := clientpool.Get(ctx)
-
reconciler := &Reconciler{
Reconciler: &base.Reconciler{
KubeClient: kubeclient.Get(ctx),
@@ -97,12 +95,19 @@ func NewController(ctx context.Context, watcher configmap.Watcher, configs *conf
BrokerClass: kafka.BrokerClass,
DataPlaneConfigMapLabeler: base.NoopConfigmapOption,
KafkaFeatureFlags: apisconfig.DefaultFeaturesConfig(),
- GetKafkaClient: clientPool.GetClient,
- GetKafkaClusterAdmin: clientPool.GetClusterAdmin,
InitOffsetsFunc: offset.InitOffsets,
ServiceAccountLister: oidcServiceaccountInformer.Lister(),
}
+ clientPool := clientpool.Get(ctx)
+ if clientPool == nil {
+ reconciler.GetKafkaClusterAdmin = clientpool.DisabledGetKafkaClusterAdminFunc
+ reconciler.GetKafkaClient = clientpool.DisabledGetClient
+ } else {
+ reconciler.GetKafkaClusterAdmin = clientPool.GetClusterAdmin
+ reconciler.GetKafkaClient = clientPool.GetClient
+ }
+
impl := triggerreconciler.NewImpl(ctx, reconciler, func(impl *controller.Impl) controller.Options {
return controller.Options{
FinalizerName: FinalizerName,
diff --git a/control-plane/pkg/reconciler/trigger/namespaced_controller.go b/control-plane/pkg/reconciler/trigger/namespaced_controller.go
index e021784959..e1ae23e98a 100644
--- a/control-plane/pkg/reconciler/trigger/namespaced_controller.go
+++ b/control-plane/pkg/reconciler/trigger/namespaced_controller.go
@@ -64,8 +64,6 @@ func NewNamespacedController(ctx context.Context, watcher configmap.Watcher, con
triggerLister := triggerInformer.Lister()
oidcServiceaccountInformer := serviceaccountinformer.Get(ctx, auth.OIDCLabelSelector)
- clientPool := clientpool.Get(ctx)
-
reconciler := &NamespacedReconciler{
Reconciler: &base.Reconciler{
KubeClient: kubeclient.Get(ctx),
@@ -87,12 +85,19 @@ func NewNamespacedController(ctx context.Context, watcher configmap.Watcher, con
ServiceAccountLister: oidcServiceaccountInformer.Lister(),
EventingClient: eventingclient.Get(ctx),
Env: configs,
- GetKafkaClient: clientPool.GetClient,
- GetKafkaClusterAdmin: clientPool.GetClusterAdmin,
InitOffsetsFunc: offset.InitOffsets,
KafkaFeatureFlags: apisconfig.DefaultFeaturesConfig(),
}
+ clientPool := clientpool.Get(ctx)
+ if clientPool == nil {
+ reconciler.GetKafkaClusterAdmin = clientpool.DisabledGetKafkaClusterAdminFunc
+ reconciler.GetKafkaClient = clientpool.DisabledGetClient
+ } else {
+ reconciler.GetKafkaClusterAdmin = clientPool.GetClusterAdmin
+ reconciler.GetKafkaClient = clientPool.GetClient
+ }
+
impl := triggerreconciler.NewImpl(ctx, reconciler, func(impl *controller.Impl) controller.Options {
return controller.Options{
FinalizerName: NamespacedFinalizerName,
diff --git a/control-plane/pkg/reconciler/trigger/v2/triggerv2.go b/control-plane/pkg/reconciler/trigger/v2/triggerv2.go
index 34ad7b0fba..8bdc0af7f4 100644
--- a/control-plane/pkg/reconciler/trigger/v2/triggerv2.go
+++ b/control-plane/pkg/reconciler/trigger/v2/triggerv2.go
@@ -209,6 +209,13 @@ func (r *Reconciler) reconcileConsumerGroup(ctx context.Context, broker *eventin
},
},
Spec: internalscg.ConsumerGroupSpec{
+ TopLevelResourceRef: &corev1.ObjectReference{
+ APIVersion: eventing.SchemeGroupVersion.String(),
+ Kind: "Broker",
+ Name: broker.Name,
+ Namespace: broker.Namespace,
+ UID: broker.UID,
+ },
Template: internalscg.ConsumerTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
diff --git a/control-plane/pkg/reconciler/trigger/v2/triggerv2_test.go b/control-plane/pkg/reconciler/trigger/v2/triggerv2_test.go
index 510e5b90a0..62ed966870 100644
--- a/control-plane/pkg/reconciler/trigger/v2/triggerv2_test.go
+++ b/control-plane/pkg/reconciler/trigger/v2/triggerv2_test.go
@@ -126,6 +126,7 @@ func TestReconcileKind(t *testing.T) {
ConsumerFilters(NewConsumerSpecFilters()),
ConsumerReply(ConsumerTopicReply()),
)),
+ withBrokerTopLevelResourceRef(),
),
},
WantEvents: []string{
@@ -187,6 +188,7 @@ func TestReconcileKind(t *testing.T) {
ConsumerFilters(NewConsumerSpecFilters()),
ConsumerReply(ConsumerTopicReply()),
)),
+ withBrokerTopLevelResourceRef(),
),
},
WantStatusUpdates: []clientgotesting.UpdateActionImpl{
@@ -241,6 +243,7 @@ func TestReconcileKind(t *testing.T) {
ConsumerFilters(NewConsumerSpecFilters()),
ConsumerReply(ConsumerTopicReply()),
)),
+ withBrokerTopLevelResourceRef(),
),
},
WantStatusUpdates: []clientgotesting.UpdateActionImpl{
@@ -296,6 +299,7 @@ func TestReconcileKind(t *testing.T) {
ConsumerFilters(NewConsumerSpecFilters()),
ConsumerReply(ConsumerTopicReply()),
)),
+ withBrokerTopLevelResourceRef(),
),
},
WantEvents: []string{
@@ -403,6 +407,7 @@ func TestReconcileKind(t *testing.T) {
ConsumerReply(ConsumerTopicReply()),
)),
ConsumerGroupReady,
+ withBrokerTopLevelResourceRef(),
),
},
},
@@ -448,6 +453,7 @@ func TestReconcileKind(t *testing.T) {
WithConsumerGroupMetaLabels(OwnerAsTriggerLabel),
WithConsumerGroupLabels(ConsumerTriggerLabel),
ConsumerGroupReady,
+ withBrokerTopLevelResourceRef(),
),
},
Key: testKey,
@@ -472,6 +478,7 @@ func TestReconcileKind(t *testing.T) {
ConsumerReply(ConsumerTopicReply()),
)),
ConsumerGroupReady,
+ withBrokerTopLevelResourceRef(),
),
},
},
@@ -515,6 +522,7 @@ func TestReconcileKind(t *testing.T) {
WithConsumerGroupOwnerRef(kmeta.NewControllerRef(newTrigger())),
WithConsumerGroupMetaLabels(OwnerAsTriggerLabel),
WithConsumerGroupLabels(ConsumerTriggerLabel),
+ withBrokerTopLevelResourceRef(),
),
},
Key: testKey,
@@ -537,6 +545,7 @@ func TestReconcileKind(t *testing.T) {
ConsumerFilters(NewConsumerSpecFilters()),
ConsumerReply(ConsumerTopicReply()),
)),
+ withBrokerTopLevelResourceRef(),
),
},
},
@@ -597,6 +606,7 @@ func TestReconcileKind(t *testing.T) {
},
}),
)),
+ withBrokerTopLevelResourceRef(),
),
NewLegacySASLSecret(ConfigMapNamespace, "secret-1"),
},
@@ -627,6 +637,7 @@ func TestReconcileKind(t *testing.T) {
},
}),
)),
+ withBrokerTopLevelResourceRef(),
),
},
},
@@ -682,6 +693,7 @@ func TestReconcileKind(t *testing.T) {
)),
ConsumerGroupReady,
ConsumerGroupReplicas(1),
+ withBrokerTopLevelResourceRef(),
),
},
Key: testKey,
@@ -736,6 +748,7 @@ func TestReconcileKind(t *testing.T) {
ConsumerReply(ConsumerTopicReply()),
)),
ConsumerGroupReplicas(1),
+ withBrokerTopLevelResourceRef(),
),
},
Key: testKey,
@@ -791,6 +804,7 @@ func TestReconcileKind(t *testing.T) {
)),
WithConsumerGroupFailed("failed", "failed"),
ConsumerGroupReplicas(1),
+ withBrokerTopLevelResourceRef(),
),
},
Key: testKey,
@@ -845,6 +859,7 @@ func TestReconcileKind(t *testing.T) {
)),
WithDeadLetterSinkURI(url.String()),
ConsumerGroupReplicas(1),
+ withBrokerTopLevelResourceRef(),
),
},
Key: testKey,
@@ -992,6 +1007,7 @@ func TestReconcileKind(t *testing.T) {
WithConsumerGroupMetaLabels(OwnerAsTriggerLabel),
WithConsumerGroupLabels(ConsumerTriggerLabel),
ConsumerGroupReady,
+ withBrokerTopLevelResourceRef(),
),
},
Key: testKey,
@@ -1015,6 +1031,7 @@ func TestReconcileKind(t *testing.T) {
ConsumerReply(ConsumerTopicReply()),
)),
ConsumerGroupReady,
+ withBrokerTopLevelResourceRef(),
),
},
},
@@ -1073,6 +1090,7 @@ func TestReconcileKind(t *testing.T) {
)),
ConsumerGroupReady,
ConsumerGroupReplicas(1),
+ withBrokerTopLevelResourceRef(),
),
},
Key: testKey,
@@ -1172,3 +1190,13 @@ func removeFinalizers() clientgotesting.PatchActionImpl {
action.Patch = []byte(patch)
return action
}
+
+func withBrokerTopLevelResourceRef() ConsumerGroupOption {
+ return WithTopLevelResourceRef(&corev1.ObjectReference{
+ APIVersion: eventing.SchemeGroupVersion.String(),
+ Kind: "Broker",
+ Namespace: BrokerNamespace,
+ Name: BrokerName,
+ UID: BrokerUUID,
+ })
+}
diff --git a/control-plane/pkg/security/secret.go b/control-plane/pkg/security/secret.go
index 23e4d9ca62..b70962d371 100644
--- a/control-plane/pkg/security/secret.go
+++ b/control-plane/pkg/security/secret.go
@@ -19,8 +19,10 @@ package security
import (
"crypto/tls"
"crypto/x509"
+ "encoding/pem"
"fmt"
"strconv"
+ "strings"
"github.com/IBM/sarama"
@@ -193,6 +195,10 @@ func sslConfig(protocol string, data map[string][]byte) kafka.ConfigOption {
if err != nil {
return fmt.Errorf("[protocol %s] failed to load x.509 key pair: %w", protocol, err)
}
+ // Java Kafka clients don't support PKCS #1 format for the private key
+ if isPrivateKeyPKCS1Format(userKeyCert) {
+ return fmt.Errorf("[protocol %s] unsupported user key format in %s, 'PKCS #1' format is not supported, convert private key to 'PKCS #8'", protocol, UserKey)
+ }
tlsCerts = []tls.Certificate{tlsCert}
}
}
@@ -220,3 +226,19 @@ func skipClientAuthCheck(data map[string][]byte) (bool, error) {
}
return enabled, nil
}
+
+func isPrivateKeyPKCS1Format(keyPEMBlock []byte) bool {
+ var keyDERBlock *pem.Block
+ for {
+ keyDERBlock, keyPEMBlock = pem.Decode(keyPEMBlock)
+ if keyDERBlock == nil {
+ return false
+ }
+ if keyDERBlock.Type == "PRIVATE KEY" || strings.HasSuffix(keyDERBlock.Type, " PRIVATE KEY") {
+ break
+ }
+ }
+
+ _, err := x509.ParsePKCS1PrivateKey(keyDERBlock.Bytes)
+ return err == nil
+}
diff --git a/control-plane/pkg/security/secret_test.go b/control-plane/pkg/security/secret_test.go
index 601cafb996..7e27176cb6 100644
--- a/control-plane/pkg/security/secret_test.go
+++ b/control-plane/pkg/security/secret_test.go
@@ -194,6 +194,22 @@ func TestSSL(t *testing.T) {
assert.NotNil(t, config.Net.TLS.Config.RootCAs)
}
+func TestSSLPKCS1(t *testing.T) {
+ ca, userKey, userCert := loadPKCS1Certs(t)
+
+ secret := map[string][]byte{
+ "protocol": []byte("SSL"),
+ "user.key": userKey,
+ "user.crt": userCert,
+ "ca.crt": ca,
+ }
+ config := sarama.NewConfig()
+
+ err := kafka.Options(config, secretData(secret))
+
+ assert.NotNil(t, err)
+}
+
func TestSSLNoUserKey(t *testing.T) {
ca, _, userCert := loadCerts(t)
@@ -384,3 +400,16 @@ func loadCerts(t *testing.T) (ca, userKey, userCert []byte) {
return ca, userKey, userCert
}
+
+func loadPKCS1Certs(t *testing.T) (ca, userKey, userCert []byte) {
+ ca, err := os.ReadFile("testdata/ca.crt")
+ assert.Nil(t, err)
+
+ userKey, err = os.ReadFile("testdata/pkcs1_user.key")
+ assert.Nil(t, err)
+
+ userCert, err = os.ReadFile("testdata/user.crt")
+ assert.Nil(t, err)
+
+ return ca, userKey, userCert
+}
diff --git a/control-plane/pkg/security/testdata/pkcs1_user.key b/control-plane/pkg/security/testdata/pkcs1_user.key
new file mode 100644
index 0000000000..6b7a292fac
--- /dev/null
+++ b/control-plane/pkg/security/testdata/pkcs1_user.key
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEA1c6XoILp78NVhSuzC8KJ1QnSGQ8CisCHkGWeEIkXI0Cuh2k6
+2+vpVIMUuHdehF45/Jxcia54GqIZ6SneN1IpW94+oP+Sls1ZtEZgJpJCyMqe8qBd
+jB2M/+CI3px8GyinnumM50TPr/zpp04XsR8I2NzhQq2IQtNAm7FjK7orZtfdqCYe
+sk5tARcObVWxKm+WOwjTWDGmlVyxwyWqFGIspV2ymv8Sx8rnOBhUFYIBqB3lefNn
+ja0Oh89oZM6ZlRrCupxsnZHXhM3i/c2+AR+tLWQEW0xlqdtle8oARe44E1bnryI1
+mDLmjQ3YuyR4/Kw+yl4Q+DLFMC7pBAZLTFbkIQIDAQABAoIBAE190DTr3e/5gyB+
+Iymq+5vMMGrGpuw1Na0fN3fUyB8NzXPkruGQkoP/8l2dXhNpt2iYH24DXyKACBYb
+B6BTVgwm89oUZ0Pi75VIQIcaUbxGu+9CMkWbXERNVC4i11Rcmswc5+XWadPmPaVW
+x315uxImlDo/fPiDapJDa6colZxzDRfa+cH2PrjzDw317Q371qEliJOJF2ZFPDRF
+vSBiSQlo9gE9vVR6lanuaG1nQFkAnN1wjb4qfjsGjSdSbQfOHkwA+jQ8o4L3csAo
+idUq8UlLmGJY7GIStF47m7TiWv/aLcTCOks7sii/gQJx9GEme+wR1Xe/BtHErqnI
+N3hUVKUCgYEA7PvaV6AJE/Ht9itRbxuoeK+yAe0Sn9z5mbuy9HQsVJH0LLL/lL5Y
+BuyWWy+Tj7IorfCEmHyY9KOq9V7HRRIa6PKzd5rVGn1D8I6yPU/n8FgZboOy4ahN
+lkbDNIuHcu27bkqiAZ4Vu2mpRtalr7QQdlS9v7S235GDQWTroDQQyycCgYEA5vaj
+mPnvYOR2S2h4pSBTKdz9Ba2fWw0MiLFIKoHmOblazqRZkLOKoN/DbnOKpbvKqyA6
+cwo4r9AF4dRLa0sE3zjmNOBZAV0Uu6fhcaZgvc+1t82P+vrZblP9pXOdFGGoed4S
+bskLL/C1oXd6+Q89rCFIkkHJXDJvxRUZNe0BA3cCgYBZ9akGxltr1NTOM9dv5AHp
+/lgGXyZIxSuC7juajFcfq2ATb8eRgUgNKNZSuxa635iNntXWxMWTaGXHSzk9wQey
+Eh+KcZ4fthmKQcDrgV+8XtUYnKnU+3yoZShI1AaQ3CngTjh9gLMjN5LoryaqMiJl
+qPl2wnUBHU3EDzla0Sjm1QKBgQCPxodu6l+OxImzRZScznOWwt+rkjp6NrRPv3R6
+KaUE2BLkQkETKAErRkBlWH290BpIzuYzyPAi2e9fdoWAhBHDV6tOzT368FPAwbBA
+zF66qjun8MopZdDGsnhab48gKe7z9j8pQfO54zFeE3+03Tz6EzoW+eb8gtU7LXgl
+LqWL3wKBgQC0+0lRsGfnsPgRDaAAwoHCxP6h3DNMcRxNMca1aI78ENJZUXfY3XzG
+yOE1i/1/SV1NQD4O1BlEqNTaDlM0yw0UttMvOPrI+ZC9hZIbCxVfXGZ7xKqIL+Vo
+nG64GxSZh7M6pQHzUjlqTpsr8JaG6O7ODQtlYPHwNw24j7YGvxfk7A==
+-----END RSA PRIVATE KEY-----
diff --git a/data-plane/contract/pom.xml b/data-plane/contract/pom.xml
index 68b7e5c3d9..5eb516374f 100644
--- a/data-plane/contract/pom.xml
+++ b/data-plane/contract/pom.xml
@@ -39,16 +39,4 @@
-
-
-
- org.ec4j.maven
- editorconfig-maven-plugin
-
- true
-
-
-
-
-
diff --git a/data-plane/dispatcher-loom/src/main/java/dev/knative/eventing/kafka/broker/dispatcherloom/LoomKafkaConsumer.java b/data-plane/dispatcher-loom/src/main/java/dev/knative/eventing/kafka/broker/dispatcherloom/LoomKafkaConsumer.java
index 164913d822..74fd5fbb2d 100644
--- a/data-plane/dispatcher-loom/src/main/java/dev/knative/eventing/kafka/broker/dispatcherloom/LoomKafkaConsumer.java
+++ b/data-plane/dispatcher-loom/src/main/java/dev/knative/eventing/kafka/broker/dispatcherloom/LoomKafkaConsumer.java
@@ -27,6 +27,7 @@
import java.util.Map;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
@@ -44,7 +45,6 @@ public class LoomKafkaConsumer implements ReactiveKafkaConsumer {
private final Consumer consumer;
private final BlockingQueue taskQueue;
private final AtomicBoolean isClosed;
- private final AtomicBoolean isFinished;
private final Thread taskRunnerThread;
private final Promise closePromise = Promise.promise();
@@ -52,7 +52,6 @@ public LoomKafkaConsumer(Vertx vertx, Consumer consumer) {
this.consumer = consumer;
this.taskQueue = new LinkedBlockingQueue<>();
this.isClosed = new AtomicBoolean(false);
- this.isFinished = new AtomicBoolean(false);
if (Boolean.parseBoolean(System.getenv("ENABLE_VIRTUAL_THREADS"))) {
this.taskRunnerThread = Thread.ofVirtual().start(this::processTaskQueue);
@@ -74,14 +73,15 @@ private void processTaskQueue() {
// Process queue elements until this is closed and the tasks queue is empty
while (!isClosed.get() || !taskQueue.isEmpty()) {
try {
- taskQueue.take().run();
+ Runnable task = taskQueue.poll(2000, TimeUnit.MILLISECONDS);
+ if (task != null) {
+ task.run();
+ }
} catch (InterruptedException e) {
logger.debug("Interrupted while waiting for task", e);
break;
}
}
-
- isFinished.set(true);
}
@Override
@@ -126,16 +126,6 @@ public Future close() {
}
logger.debug("Queue is empty");
- if (!isFinished.get()) {
- logger.debug("Background thread not finished yet, waiting for it to complete");
- Thread.sleep(2000L);
-
- if (!isFinished.get()) {
- logger.debug("Background thread still not finished yet, interrupting background thread");
- taskRunnerThread.interrupt();
- }
- }
-
taskRunnerThread.join();
closePromise.tryComplete();
diff --git a/data-plane/pom.xml b/data-plane/pom.xml
index 3f8fe445ae..4f89e89955 100644
--- a/data-plane/pom.xml
+++ b/data-plane/pom.xml
@@ -35,7 +35,6 @@
3.4.2
3.2.5
3.5.1
- 0.1.1
0.8.12
3.5.0
2.0.0
@@ -460,27 +459,6 @@
-
- org.ec4j.maven
- editorconfig-maven-plugin
- ${maven.editorconfig.plugin.version}
-
-
- check
- verify
-
- check
-
-
-
-
-
- .dockerignore
- config/***
- .mvn/***
-
-
-
org.apache.maven.plugins
maven-surefire-plugin
diff --git a/data-plane/profiler/run.sh b/data-plane/profiler/run.sh
index 968c958d5c..848f503cf5 100755
--- a/data-plane/profiler/run.sh
+++ b/data-plane/profiler/run.sh
@@ -52,7 +52,7 @@ echo "Async profiler URL: ${ASYNC_PROFILER_URL}"
echo "Kafka URL: ${KAFKA_URL}"
# Build the data plane.
-cd "${PROJECT_ROOT_DIR}" && ./mvnw package -DskipTests -Dlicense.skip -Deditorconfig.skip -B -U --no-transfer-progress && cd - || exit 1
+cd "${PROJECT_ROOT_DIR}" && ./mvnw package -DskipTests -Dlicense.skip -B -U --no-transfer-progress && cd - || exit 1
# Download async profiler.
rm -rf async-profiler
diff --git a/data-plane/receiver-loom/src/main/java/dev/knative/eventing/kafka/broker/receiverloom/LoomKafkaProducer.java b/data-plane/receiver-loom/src/main/java/dev/knative/eventing/kafka/broker/receiverloom/LoomKafkaProducer.java
index 402760854e..a1ba796715 100644
--- a/data-plane/receiver-loom/src/main/java/dev/knative/eventing/kafka/broker/receiverloom/LoomKafkaProducer.java
+++ b/data-plane/receiver-loom/src/main/java/dev/knative/eventing/kafka/broker/receiverloom/LoomKafkaProducer.java
@@ -26,6 +26,7 @@
import java.util.Objects;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
@@ -84,7 +85,11 @@ private void sendFromQueue() {
// Process queue elements until this is closed and the tasks queue is empty
while (!isClosed.get() || !eventQueue.isEmpty()) {
try {
- final var recordPromise = eventQueue.take();
+ final var recordPromise = eventQueue.poll(2000, TimeUnit.MILLISECONDS);
+ if (recordPromise == null) {
+ continue;
+ }
+
final var startedSpan = this.tracer == null
? null
: this.tracer.prepareSendMessage(recordPromise.getContext(), recordPromise.getRecord());
@@ -140,8 +145,6 @@ public Future close() {
logger.debug("Waiting for the eventQueue to become empty");
Thread.sleep(2000L);
}
- logger.debug("Interrupting sendFromQueueThread thread");
- sendFromQueueThread.interrupt();
logger.debug("Waiting for sendFromQueueThread thread to complete");
sendFromQueueThread.join();
logger.debug("Closing the producer");
diff --git a/go.mod b/go.mod
index 7430fb7592..7838fbccad 100644
--- a/go.mod
+++ b/go.mod
@@ -3,7 +3,7 @@ module knative.dev/eventing-kafka-broker
go 1.22.0
require (
- github.com/IBM/sarama v1.43.1
+ github.com/IBM/sarama v1.43.3
github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2 v2.15.2
github.com/cloudevents/sdk-go/v2 v2.15.2
github.com/google/go-cmp v0.6.0
@@ -35,10 +35,10 @@ require (
k8s.io/apiserver v0.30.3
k8s.io/client-go v0.30.3
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
- knative.dev/eventing v0.42.1-0.20240827090532-ecae8953ff0c
- knative.dev/hack v0.0.0-20240814130635-06f7aff93954
- knative.dev/pkg v0.0.0-20240815051656-89743d9bbf7c
- knative.dev/reconciler-test v0.0.0-20240820100420-036ce14b8617
+ knative.dev/eventing v0.42.1-0.20240926123447-e7fca7646f4a
+ knative.dev/hack v0.0.0-20240909014011-fc6a8452af6d
+ knative.dev/pkg v0.0.0-20240930065954-503173341499
+ knative.dev/reconciler-test v0.0.0-20240926123451-87d857060042
sigs.k8s.io/controller-runtime v0.12.3
sigs.k8s.io/yaml v1.4.0
)
@@ -59,7 +59,7 @@ require (
github.com/cloudevents/sdk-go/sql/v2 v2.15.2 // indirect
github.com/coreos/go-oidc/v3 v3.9.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/eapache/go-resiliency v1.6.0 // indirect
+ github.com/eapache/go-resiliency v1.7.0 // indirect
github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect
github.com/eapache/queue v1.1.0 // indirect
github.com/emicklei/go-restful/v3 v3.12.1 // indirect
@@ -72,7 +72,7 @@ require (
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.21.0 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
- github.com/gobuffalo/flect v1.0.2 // indirect
+ github.com/gobuffalo/flect v1.0.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
@@ -90,7 +90,7 @@ require (
github.com/jcmturner/rpc/v2 v2.0.3 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
- github.com/klauspost/compress v1.17.8 // indirect
+ github.com/klauspost/compress v1.17.9 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
@@ -113,22 +113,22 @@ require (
github.com/wavesoftware/go-ensure v1.0.0 // indirect
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
github.com/xdg-go/stringprep v1.0.4 // indirect
- go.uber.org/automaxprocs v1.5.3 // indirect
- golang.org/x/crypto v0.26.0 // indirect
- golang.org/x/mod v0.20.0 // indirect
- golang.org/x/net v0.28.0 // indirect
+ go.uber.org/automaxprocs v1.6.0 // indirect
+ golang.org/x/crypto v0.27.0 // indirect
+ golang.org/x/mod v0.21.0 // indirect
+ golang.org/x/net v0.29.0 // indirect
golang.org/x/oauth2 v0.22.0 // indirect
golang.org/x/sync v0.8.0 // indirect
- golang.org/x/sys v0.24.0 // indirect
- golang.org/x/term v0.23.0 // indirect
- golang.org/x/text v0.17.0 // indirect
+ golang.org/x/sys v0.25.0 // indirect
+ golang.org/x/term v0.24.0 // indirect
+ golang.org/x/text v0.18.0 // indirect
golang.org/x/time v0.6.0 // indirect
- golang.org/x/tools v0.24.0 // indirect
+ golang.org/x/tools v0.25.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/api v0.183.0 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20240808171019-573a1156607a // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240808171019-573a1156607a // indirect
- google.golang.org/grpc v1.65.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect
+ google.golang.org/grpc v1.67.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
diff --git a/go.sum b/go.sum
index a0ac63315f..73610c12ba 100644
--- a/go.sum
+++ b/go.sum
@@ -60,8 +60,8 @@ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbt
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/IBM/sarama v1.43.1 h1:Z5uz65Px7f4DhI/jQqEm/tV9t8aU+JUdTyW/K/fCXpA=
-github.com/IBM/sarama v1.43.1/go.mod h1:GG5q1RURtDNPz8xxJs3mgX6Ytak8Z9eLhAkJPObe2xE=
+github.com/IBM/sarama v1.43.3 h1:Yj6L2IaNvb2mRBop39N7mmJAHBVY3dTPncr3qGVkxPA=
+github.com/IBM/sarama v1.43.3/go.mod h1:FVIRaLrhK3Cla/9FfRF5X9Zua2KpS3SYIXxhac1H+FQ=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
@@ -150,8 +150,8 @@ github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
-github.com/eapache/go-resiliency v1.6.0 h1:CqGDTLtpwuWKn6Nj3uNUdflaq+/kIPsg0gfNzHton30=
-github.com/eapache/go-resiliency v1.6.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho=
+github.com/eapache/go-resiliency v1.7.0 h1:n3NRTnBn5N0Cbi/IeOHuQn9s2UwVUH7Ga0ZWcP+9JTA=
+github.com/eapache/go-resiliency v1.7.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho=
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws=
github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0=
@@ -275,8 +275,8 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
-github.com/gobuffalo/flect v1.0.2 h1:eqjPGSo2WmjgY2XlpGwo2NXgL3RucAKo4k4qQMNA5sA=
-github.com/gobuffalo/flect v1.0.2/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs=
+github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4=
+github.com/gobuffalo/flect v1.0.3/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs=
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
@@ -454,8 +454,8 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU=
-github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
+github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
+github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
@@ -709,8 +709,8 @@ go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
-go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8=
-go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
+go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
+go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
@@ -741,8 +741,8 @@ golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWP
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
-golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
-golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
+golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A=
+golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -777,8 +777,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0=
-golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
+golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -831,8 +831,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
-golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
-golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
+golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo=
+golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -919,8 +919,8 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg=
-golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34=
+golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@@ -928,8 +928,8 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
-golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU=
-golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk=
+golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM=
+golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -943,8 +943,8 @@ golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
-golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
-golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
+golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224=
+golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -1010,8 +1010,8 @@ golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
-golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24=
-golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ=
+golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE=
+golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -1076,10 +1076,10 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto/googleapis/api v0.0.0-20240808171019-573a1156607a h1:KyUe15n7B1YCu+kMmPtlXxgkLQbp+Dw0tCRZf9Sd+CE=
-google.golang.org/genproto/googleapis/api v0.0.0-20240808171019-573a1156607a/go.mod h1:4+X6GvPs+25wZKbQq9qyAXrwIRExv7w0Ea6MgZLZiDM=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240808171019-573a1156607a h1:EKiZZXueP9/T68B8Nl0GAx9cjbQnCId0yP3qPMgaaHs=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240808171019-573a1156607a/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
+google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8=
+google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:e7S5W7MGGLaSu8j3YjdezkZ+m1/Nm0uRVRMEMGk26Xs=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
@@ -1095,8 +1095,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
-google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc=
-google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
+google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw=
+google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -1214,14 +1214,14 @@ k8s.io/utils v0.0.0-20200912215256-4140de9c8800/go.mod h1:jPW/WVKK9YHAvNhRxK0md/
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
-knative.dev/eventing v0.42.1-0.20240827090532-ecae8953ff0c h1:K8OG+CT1NbgRojpKU49SWDpAQd/4YCElJZH5LJLwBR4=
-knative.dev/eventing v0.42.1-0.20240827090532-ecae8953ff0c/go.mod h1:Clx8z37Nwg321H9+vGNxp5C6bVdo4l4XM5g6T5CgZVI=
-knative.dev/hack v0.0.0-20240814130635-06f7aff93954 h1:dGMK5VoL75szvrYQTL9NqhPYHu1f5dGaXx1hJI8fAFM=
-knative.dev/hack v0.0.0-20240814130635-06f7aff93954/go.mod h1:R0ritgYtjLDO9527h5vb5X6gfvt5LCrJ55BNbVDsWiY=
-knative.dev/pkg v0.0.0-20240815051656-89743d9bbf7c h1:2crXVk4FG0dSG6WHaIT+WKbUzn7qG2wn0AfYmvA22zs=
-knative.dev/pkg v0.0.0-20240815051656-89743d9bbf7c/go.mod h1:cI2RPEEHZk+/dBpfHobs0aBdPA1mMZVUVWnGAc8NSzM=
-knative.dev/reconciler-test v0.0.0-20240820100420-036ce14b8617 h1:UW3CH3wWocQiQKQYxf8gdDw9EEOgL31GU8yXY4w5lE8=
-knative.dev/reconciler-test v0.0.0-20240820100420-036ce14b8617/go.mod h1:GmL2taVxYoXADdFOl534FcW+rYDQP6LDjWj82Q5yQDI=
+knative.dev/eventing v0.42.1-0.20240926123447-e7fca7646f4a h1:HnJ8kus8avX0oMuzA1K3mKKV+mZJ32kJGqvtIYcoeEw=
+knative.dev/eventing v0.42.1-0.20240926123447-e7fca7646f4a/go.mod h1:CguA8wPeeeED9ZIAJ+NqCo8fGj1W3gkEvTQs7Enk/oo=
+knative.dev/hack v0.0.0-20240909014011-fc6a8452af6d h1:mgROhGJG3+g0SBkaG4Y2HxrIOLN3ZZcN4+IFZla+Zqs=
+knative.dev/hack v0.0.0-20240909014011-fc6a8452af6d/go.mod h1:R0ritgYtjLDO9527h5vb5X6gfvt5LCrJ55BNbVDsWiY=
+knative.dev/pkg v0.0.0-20240930065954-503173341499 h1:5xOSRTSjmakkXWtFFWtNTlNcks0FTN7T7wHeFrWR0qg=
+knative.dev/pkg v0.0.0-20240930065954-503173341499/go.mod h1:Mh16+83vjH4yF2fTRQLiErZ1RTawIu5HMTKFVCnxx3U=
+knative.dev/reconciler-test v0.0.0-20240926123451-87d857060042 h1:iex7NiH53E+EDGdC7ekbr3YL0qVlONHvZOYLra76y1Y=
+knative.dev/reconciler-test v0.0.0-20240926123451-87d857060042/go.mod h1:PXOqfSSDHzaVPXrpEPlxsOSQRIQJGnSrj2IuVQh3Kas=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
diff --git a/test/config/monitoring.yaml b/test/config/monitoring.yaml
index 9d9ec3dbdd..ca52448411 100644
--- a/test/config/monitoring.yaml
+++ b/test/config/monitoring.yaml
@@ -38,6 +38,7 @@ spec:
metadata:
labels:
app: zipkin
+ sidecar.istio.io/inject: "false"
annotations:
sidecar.istio.io/inject: "false"
spec:
diff --git a/test/e2e_new/broker_test.go b/test/e2e_new/broker_test.go
index 36aa891e1a..219fceac99 100644
--- a/test/e2e_new/broker_test.go
+++ b/test/e2e_new/broker_test.go
@@ -332,7 +332,6 @@ func TestBrokerSupportsAuthZ(t *testing.T) {
knative.WithLoggingConfig,
knative.WithTracingConfig,
k8s.WithEventListener,
- environment.WithPollTimings(4*time.Second, 12*time.Minute),
environment.Managed(t),
eventshub.WithTLS(t),
)
diff --git a/test/e2e_new/sink_auth_test.go b/test/e2e_new/sink_auth_test.go
index 5b9b41ba83..6b70c70adb 100644
--- a/test/e2e_new/sink_auth_test.go
+++ b/test/e2e_new/sink_auth_test.go
@@ -34,6 +34,8 @@ import (
"knative.dev/reconciler-test/pkg/feature"
"knative.dev/reconciler-test/pkg/k8s"
"knative.dev/reconciler-test/pkg/knative"
+
+ "knative.dev/eventing/test/rekt/features/authz"
)
func TestKafkaSinkSupportsOIDC(t *testing.T) {
@@ -56,3 +58,23 @@ func TestKafkaSinkSupportsOIDC(t *testing.T) {
env.TestSet(ctx, t, oidc.AddressableOIDCConformance(kafkasink.GVR(), "KafkaSink", sink, env.Namespace()))
}
+
+func TestKafkaSinkSupportsAuthZ(t *testing.T) {
+ t.Parallel()
+
+ ctx, env := global.Environment(
+ knative.WithKnativeNamespace(system.Namespace()),
+ knative.WithLoggingConfig,
+ knative.WithTracingConfig,
+ k8s.WithEventListener,
+ environment.Managed(t),
+ eventshub.WithTLS(t),
+ )
+
+ topic := feature.MakeRandomK8sName("topic")
+ sink := feature.MakeRandomK8sName("kafkasink")
+ env.Prerequisite(ctx, t, kafkatopic.GoesReady(topic))
+ env.Prerequisite(ctx, t, kafkasink.GoesReady(sink, topic, testpkg.BootstrapServersPlaintextArr))
+
+ env.TestSet(ctx, t, authz.AddressableAuthZConformance(kafkasink.GVR(), "KafkaSink", sink))
+}
diff --git a/test/e2e_new_channel/kafka_channel_test.go b/test/e2e_new_channel/kafka_channel_test.go
index 1b651030d3..9ad97e9639 100644
--- a/test/e2e_new_channel/kafka_channel_test.go
+++ b/test/e2e_new_channel/kafka_channel_test.go
@@ -37,6 +37,9 @@ import (
"knative.dev/eventing-kafka-broker/test/rekt/features"
"knative.dev/eventing-kafka-broker/test/rekt/features/kafkachannel"
kafkachannelresource "knative.dev/eventing-kafka-broker/test/rekt/resources/kafkachannel"
+ channelresource "knative.dev/eventing/test/rekt/resources/channel"
+
+ "knative.dev/eventing/test/rekt/features/authz"
)
const (
@@ -111,6 +114,42 @@ func TestKafkaChannelOIDC(t *testing.T) {
env.TestSet(ctx, t, oidc.AddressableOIDCConformance(kafkachannelresource.GVR(), "KafkaChannel", name, env.Namespace()))
}
+func TestChannelWithBackingKafkaChannelSupportsAuthZ(t *testing.T) {
+ t.Parallel()
+
+ ctx, env := global.Environment(
+ knative.WithKnativeNamespace(system.Namespace()),
+ knative.WithLoggingConfig,
+ knative.WithTracingConfig,
+ k8s.WithEventListener,
+ environment.Managed(t),
+ eventshub.WithTLS(t),
+ )
+
+ name := feature.MakeRandomK8sName("channel")
+ env.Prerequisite(ctx, t, channel.GoesReady(name))
+
+ env.TestSet(ctx, t, authz.AddressableAuthZConformance(channelresource.GVR(), "Channel", name))
+}
+
+func TestKafkaChannelSupportsAuthZ(t *testing.T) {
+ t.Parallel()
+
+ ctx, env := global.Environment(
+ knative.WithKnativeNamespace(system.Namespace()),
+ knative.WithLoggingConfig,
+ knative.WithTracingConfig,
+ k8s.WithEventListener,
+ environment.Managed(t),
+ eventshub.WithTLS(t),
+ )
+
+ name := feature.MakeRandomK8sName("kafkachannel")
+ env.Prerequisite(ctx, t, channel.ImplGoesReady(name))
+
+ env.TestSet(ctx, t, authz.AddressableAuthZConformance(kafkachannelresource.GVR(), "KafkaChannel", name))
+}
+
func TestKafkaChannelKedaScaling(t *testing.T) {
t.Parallel()
diff --git a/test/kafka/kafka-ephemeral.yaml b/test/kafka/kafka-ephemeral.yaml
index 7bf55f0b66..1c19bd9090 100644
--- a/test/kafka/kafka-ephemeral.yaml
+++ b/test/kafka/kafka-ephemeral.yaml
@@ -18,7 +18,7 @@ metadata:
name: my-cluster
spec:
kafka:
- version: 3.7.1
+ version: 3.8.0
replicas: 3
listeners:
# PLAINTEXT
@@ -63,7 +63,7 @@ spec:
transaction.state.log.replication.factor: 3
transaction.state.log.min.isr: 2
auto.create.topics.enable: false
- inter.broker.protocol.version: "3.7"
+ inter.broker.protocol.version: "3.8"
storage:
type: ephemeral
zookeeper:
diff --git a/test/kafka/strimzi-cluster-operator.yaml b/test/kafka/strimzi-cluster-operator.yaml
index 11103e5a8e..c34bcc75a3 100644
--- a/test/kafka/strimzi-cluster-operator.yaml
+++ b/test/kafka/strimzi-cluster-operator.yaml
@@ -1,4 +1,225 @@
apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: strimzi-cluster-operator-entity-operator-delegation
+ labels:
+ app: strimzi
+# The Entity Operator cluster role must be bound to the cluster operator service account so that it can delegate the cluster role to the Entity Operator.
+# This must be done to avoid escalating privileges which would be blocked by Kubernetes.
+subjects:
+ - kind: ServiceAccount
+ name: strimzi-cluster-operator
+ namespace: myproject
+roleRef:
+ kind: ClusterRole
+ name: strimzi-entity-operator
+ apiGroup: rbac.authorization.k8s.io
+
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: strimzipodsets.core.strimzi.io
+ labels:
+ app: strimzi
+ strimzi.io/crd-install: "true"
+spec:
+ group: core.strimzi.io
+ names:
+ kind: StrimziPodSet
+ listKind: StrimziPodSetList
+ singular: strimzipodset
+ plural: strimzipodsets
+ shortNames:
+ - sps
+ categories:
+ - strimzi
+ scope: Namespaced
+ conversion:
+ strategy: None
+ versions:
+ - name: v1beta2
+ served: true
+ storage: true
+ subresources:
+ status: {}
+ additionalPrinterColumns:
+ - name: Pods
+ description: Number of pods managed by the StrimziPodSet
+ jsonPath: .status.pods
+ type: integer
+ - name: Ready Pods
+ description: Number of ready pods managed by the StrimziPodSet
+ jsonPath: .status.readyPods
+ type: integer
+ - name: Current Pods
+ description: Number of up-to-date pods managed by the StrimziPodSet
+ jsonPath: .status.currentPods
+ type: integer
+ - name: Age
+ description: Age of the StrimziPodSet
+ jsonPath: .metadata.creationTimestamp
+ type: date
+ schema:
+ openAPIV3Schema:
+ type: object
+ properties:
+ apiVersion:
+ type: string
+ description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources"
+ kind:
+ type: string
+ description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
+ metadata:
+ type: object
+ spec:
+ type: object
+ properties:
+ selector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ description: "Selector is a label query which matches all the pods managed by this `StrimziPodSet`. Only `matchLabels` is supported. If `matchExpressions` is set, it will be ignored."
+ pods:
+ type: array
+ items:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ description: The Pods managed by this StrimziPodSet.
+ required:
+ - selector
+ - pods
+ description: The specification of the StrimziPodSet.
+ status:
+ type: object
+ properties:
+ conditions:
+ type: array
+ items:
+ type: object
+ properties:
+ type:
+ type: string
+ description: "The unique identifier of a condition, used to distinguish between other conditions in the resource."
+ status:
+ type: string
+ description: "The status of the condition, either True, False or Unknown."
+ lastTransitionTime:
+ type: string
+ description: "Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone."
+ reason:
+ type: string
+ description: The reason for the condition's last transition (a single word in CamelCase).
+ message:
+ type: string
+ description: Human-readable message indicating details about the condition's last transition.
+ description: List of status conditions.
+ observedGeneration:
+ type: integer
+ description: The generation of the CRD that was last reconciled by the operator.
+ pods:
+ type: integer
+ description: Number of pods managed by this `StrimziPodSet` resource.
+ readyPods:
+ type: integer
+ description: Number of pods managed by this `StrimziPodSet` resource that are ready.
+ currentPods:
+ type: integer
+ description: Number of pods managed by this `StrimziPodSet` resource that have the current revision.
+ description: The status of the StrimziPodSet.
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: strimzi-cluster-operator
+ labels:
+ app: strimzi
+subjects:
+ - kind: ServiceAccount
+ name: strimzi-cluster-operator
+ namespace: myproject
+roleRef:
+ kind: ClusterRole
+ name: strimzi-cluster-operator-global
+ apiGroup: rbac.authorization.k8s.io
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: strimzi-entity-operator
+ labels:
+ app: strimzi
+rules:
+ - apiGroups:
+ - "kafka.strimzi.io"
+ resources:
+ # The Entity Operator contains the Topic Operator which needs to access and manage KafkaTopic resources
+ - kafkatopics
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - patch
+ - update
+ - delete
+ - apiGroups:
+ - "kafka.strimzi.io"
+ resources:
+ # The Entity Operator contains the User Operator which needs to access and manage KafkaUser resources
+ - kafkausers
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - patch
+ - update
+ - apiGroups:
+ - "kafka.strimzi.io"
+ resources:
+ # The Entity Operator contains the Topic Operator which needs to access and manage KafkaTopic resources
+ - kafkatopics/status
+ # The Entity Operator contains the User Operator which needs to access and manage KafkaUser resources
+ - kafkausers/status
+ verbs:
+ - get
+ - patch
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ # The entity operator user-operator needs to access and manage secrets to store generated credentials
+ - secrets
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - patch
+ - update
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: strimzi-cluster-operator-namespaced
@@ -162,19 +383,19 @@ rules:
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
- name: kafkamirrormakers.kafka.strimzi.io
+ name: kafkarebalances.kafka.strimzi.io
labels:
app: strimzi
strimzi.io/crd-install: "true"
spec:
group: kafka.strimzi.io
names:
- kind: KafkaMirrorMaker
- listKind: KafkaMirrorMakerList
- singular: kafkamirrormaker
- plural: kafkamirrormakers
+ kind: KafkaRebalance
+ listKind: KafkaRebalanceList
+ singular: kafkarebalance
+ plural: kafkarebalances
shortNames:
- - kmm
+ - kr
categories:
- strimzi
scope: Namespaced
@@ -186,26 +407,176 @@ spec:
storage: true
subresources:
status: {}
- scale:
- specReplicasPath: .spec.replicas
- statusReplicasPath: .status.replicas
- labelSelectorPath: .status.labelSelector
additionalPrinterColumns:
- - name: Desired replicas
- description: The desired number of Kafka MirrorMaker replicas
- jsonPath: .spec.replicas
- type: integer
- - name: Consumer Bootstrap Servers
- description: The boostrap servers for the consumer
- jsonPath: .spec.consumer.bootstrapServers
+ - name: Cluster
+ description: The name of the Kafka cluster this resource rebalances
+ jsonPath: .metadata.labels.strimzi\.io/cluster
type: string
- priority: 1
- - name: Producer Bootstrap Servers
- description: The boostrap servers for the producer
- jsonPath: .spec.producer.bootstrapServers
+ - name: PendingProposal
+ description: A proposal has been requested from Cruise Control
+ jsonPath: ".status.conditions[?(@.type==\"PendingProposal\")].status"
type: string
- priority: 1
- - name: Ready
+ - name: ProposalReady
+ description: A proposal is ready and waiting for approval
+ jsonPath: ".status.conditions[?(@.type==\"ProposalReady\")].status"
+ type: string
+ - name: Rebalancing
+ description: Cruise Control is doing the rebalance
+ jsonPath: ".status.conditions[?(@.type==\"Rebalancing\")].status"
+ type: string
+ - name: Ready
+ description: The rebalance is complete
+ jsonPath: ".status.conditions[?(@.type==\"Ready\")].status"
+ type: string
+ - name: NotReady
+ description: There is an error on the custom resource
+ jsonPath: ".status.conditions[?(@.type==\"NotReady\")].status"
+ type: string
+ - name: Stopped
+ description: Processing the proposal or running rebalancing was stopped
+ jsonPath: ".status.conditions[?(@.type==\"Stopped\")].status"
+ type: string
+ schema:
+ openAPIV3Schema:
+ type: object
+ properties:
+ apiVersion:
+ type: string
+ description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources"
+ kind:
+ type: string
+ description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
+ metadata:
+ type: object
+ spec:
+ type: object
+ properties:
+ mode:
+ type: string
+ enum:
+ - full
+ - add-brokers
+ - remove-brokers
+ description: "Mode to run the rebalancing. The supported modes are `full`, `add-brokers`, `remove-brokers`.\nIf not specified, the `full` mode is used by default. \n\n* `full` mode runs the rebalancing across all the brokers in the cluster.\n* `add-brokers` mode can be used after scaling up the cluster to move some replicas to the newly added brokers.\n* `remove-brokers` mode can be used before scaling down the cluster to move replicas out of the brokers to be removed.\n"
+ brokers:
+ type: array
+ items:
+ type: integer
+ description: The list of newly added brokers in case of scaling up or the ones to be removed in case of scaling down to use for rebalancing. This list can be used only with rebalancing mode `add-brokers` and `removed-brokers`. It is ignored with `full` mode.
+ goals:
+ type: array
+ items:
+ type: string
+ description: "A list of goals, ordered by decreasing priority, to use for generating and executing the rebalance proposal. The supported goals are available at https://github.com/linkedin/cruise-control#goals. If an empty goals list is provided, the goals declared in the default.goals Cruise Control configuration parameter are used."
+ skipHardGoalCheck:
+ type: boolean
+ description: Whether to allow the hard goals specified in the Kafka CR to be skipped in optimization proposal generation. This can be useful when some of those hard goals are preventing a balance solution being found. Default is false.
+ rebalanceDisk:
+ type: boolean
+ description: "Enables intra-broker disk balancing, which balances disk space utilization between disks on the same broker. Only applies to Kafka deployments that use JBOD storage with multiple disks. When enabled, inter-broker balancing is disabled. Default is false."
+ excludedTopics:
+ type: string
+ description: A regular expression where any matching topics will be excluded from the calculation of optimization proposals. This expression will be parsed by the java.util.regex.Pattern class; for more information on the supported format consult the documentation for that class.
+ concurrentPartitionMovementsPerBroker:
+ type: integer
+ minimum: 0
+ description: The upper bound of ongoing partition replica movements going into/out of each broker. Default is 5.
+ concurrentIntraBrokerPartitionMovements:
+ type: integer
+ minimum: 0
+ description: The upper bound of ongoing partition replica movements between disks within each broker. Default is 2.
+ concurrentLeaderMovements:
+ type: integer
+ minimum: 0
+ description: The upper bound of ongoing partition leadership movements. Default is 1000.
+ replicationThrottle:
+ type: integer
+ minimum: 0
+ description: "The upper bound, in bytes per second, on the bandwidth used to move replicas. There is no limit by default."
+ replicaMovementStrategies:
+ type: array
+ items:
+ type: string
+ description: "A list of strategy class names used to determine the execution order for the replica movements in the generated optimization proposal. By default BaseReplicaMovementStrategy is used, which will execute the replica movements in the order that they were generated."
+ description: The specification of the Kafka rebalance.
+ status:
+ type: object
+ properties:
+ conditions:
+ type: array
+ items:
+ type: object
+ properties:
+ type:
+ type: string
+ description: "The unique identifier of a condition, used to distinguish between other conditions in the resource."
+ status:
+ type: string
+ description: "The status of the condition, either True, False or Unknown."
+ lastTransitionTime:
+ type: string
+ description: "Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone."
+ reason:
+ type: string
+ description: The reason for the condition's last transition (a single word in CamelCase).
+ message:
+ type: string
+ description: Human-readable message indicating details about the condition's last transition.
+ description: List of status conditions.
+ observedGeneration:
+ type: integer
+ description: The generation of the CRD that was last reconciled by the operator.
+ sessionId:
+ type: string
+ description: The session identifier for requests to Cruise Control pertaining to this KafkaRebalance resource. This is used by the Kafka Rebalance operator to track the status of ongoing rebalancing operations.
+ optimizationResult:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ description: A JSON object describing the optimization result.
+ description: The status of the Kafka rebalance.
+
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: kafkatopics.kafka.strimzi.io
+ labels:
+ app: strimzi
+ strimzi.io/crd-install: "true"
+spec:
+ group: kafka.strimzi.io
+ names:
+ kind: KafkaTopic
+ listKind: KafkaTopicList
+ singular: kafkatopic
+ plural: kafkatopics
+ shortNames:
+ - kt
+ categories:
+ - strimzi
+ scope: Namespaced
+ conversion:
+ strategy: None
+ versions:
+ - name: v1beta2
+ served: true
+ storage: true
+ subresources:
+ status: {}
+ additionalPrinterColumns:
+ - name: Cluster
+ description: The name of the Kafka cluster this topic belongs to
+ jsonPath: .metadata.labels.strimzi\.io/cluster
+ type: string
+ - name: Partitions
+ description: The desired number of partitions in the topic
+ jsonPath: .spec.partitions
+ type: integer
+ - name: Replication factor
+ description: The desired number of replicas of each partition
+ jsonPath: .spec.replicas
+ type: integer
+ - name: Ready
description: The state of the custom resource
jsonPath: ".status.conditions[?(@.type==\"Ready\")].status"
type: string
@@ -224,7407 +595,4082 @@ spec:
spec:
type: object
properties:
- version:
+ topicName:
type: string
- description: The Kafka MirrorMaker version. Defaults to the latest version. Consult the documentation to understand the process required to upgrade or downgrade the version.
+ description: The name of the topic. When absent this will default to the metadata.name of the topic. It is recommended to not set this unless the topic name is not a valid Kubernetes resource name.
+ partitions:
+ type: integer
+ minimum: 1
+ description: "The number of partitions the topic should have. This cannot be decreased after topic creation. It can be increased after topic creation, but it is important to understand the consequences that has, especially for topics with semantic partitioning. When absent this will default to the broker configuration for `num.partitions`."
replicas:
type: integer
- minimum: 0
- description: The number of pods in the `Deployment`.
- image:
+ minimum: 1
+ maximum: 32767
+ description: The number of replicas the topic should have. When absent this will default to the broker configuration for `default.replication.factor`.
+ config:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ description: The topic configuration.
+ description: The specification of the topic.
+ status:
+ type: object
+ properties:
+ conditions:
+ type: array
+ items:
+ type: object
+ properties:
+ type:
+ type: string
+ description: "The unique identifier of a condition, used to distinguish between other conditions in the resource."
+ status:
+ type: string
+ description: "The status of the condition, either True, False or Unknown."
+ lastTransitionTime:
+ type: string
+ description: "Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone."
+ reason:
+ type: string
+ description: The reason for the condition's last transition (a single word in CamelCase).
+ message:
+ type: string
+ description: Human-readable message indicating details about the condition's last transition.
+ description: List of status conditions.
+ observedGeneration:
+ type: integer
+ description: The generation of the CRD that was last reconciled by the operator.
+ topicName:
type: string
- description: "The container image used for Kafka MirrorMaker pods. If no image name is explicitly specified, it is determined based on the `spec.version` configuration. The image names are specifically mapped to corresponding versions in the Cluster Operator configuration."
- consumer:
+ description: Topic name.
+ topicId:
+ type: string
+ description: "The topic's id. For a KafkaTopic with the ready condition, this will change only if the topic gets deleted and recreated with the same name."
+ replicasChange:
type: object
properties:
- numStreams:
- type: integer
- minimum: 1
- description: Specifies the number of consumer stream threads to create.
- offsetCommitInterval:
+ targetReplicas:
type: integer
- description: Specifies the offset auto-commit interval in ms. Default value is 60000.
- bootstrapServers:
+ description: The target replicas value requested by the user. This may be different from .spec.replicas when a change is ongoing.
+ state:
type: string
- description: A list of host:port pairs for establishing the initial connection to the Kafka cluster.
- groupId:
+ enum:
+ - pending
+ - ongoing
+ description: "Current state of the replicas change operation. This can be `pending`, when the change has been requested, or `ongoing`, when the change has been successfully submitted to Cruise Control."
+ message:
type: string
- description: A unique string that identifies the consumer group this consumer belongs to.
- authentication:
- type: object
- properties:
- accessToken:
- type: object
- properties:
- key:
- type: string
- description: The key under which the secret value is stored in the Kubernetes Secret.
- secretName:
- type: string
- description: The name of the Kubernetes Secret containing the secret value.
- required:
- - key
- - secretName
- description: Link to Kubernetes Secret containing the access token which was obtained from the authorization server.
- accessTokenIsJwt:
- type: boolean
- description: Configure whether access token should be treated as JWT. This should be set to `false` if the authorization server returns opaque tokens. Defaults to `true`.
- audience:
- type: string
- description: "OAuth audience to use when authenticating against the authorization server. Some authorization servers require the audience to be explicitly set. The possible values depend on how the authorization server is configured. By default, `audience` is not specified when performing the token endpoint request."
- certificateAndKey:
- type: object
- properties:
- secretName:
- type: string
- description: The name of the Secret containing the certificate.
- certificate:
- type: string
- description: The name of the file certificate in the Secret.
- key:
- type: string
- description: The name of the private key in the Secret.
- required:
- - secretName
- - certificate
- - key
- description: Reference to the `Secret` which holds the certificate and private key pair.
- clientId:
- type: string
- description: OAuth Client ID which the Kafka client can use to authenticate against the OAuth server and use the token endpoint URI.
- clientSecret:
- type: object
- properties:
- key:
- type: string
- description: The key under which the secret value is stored in the Kubernetes Secret.
- secretName:
- type: string
- description: The name of the Kubernetes Secret containing the secret value.
- required:
- - key
- - secretName
- description: Link to Kubernetes Secret containing the OAuth client secret which the Kafka client can use to authenticate against the OAuth server and use the token endpoint URI.
- connectTimeoutSeconds:
- type: integer
- description: "The connect timeout in seconds when connecting to authorization server. If not set, the effective connect timeout is 60 seconds."
- disableTlsHostnameVerification:
- type: boolean
- description: Enable or disable TLS hostname verification. Default value is `false`.
- enableMetrics:
- type: boolean
- description: Enable or disable OAuth metrics. Default value is `false`.
- httpRetries:
- type: integer
- description: "The maximum number of retries to attempt if an initial HTTP request fails. If not set, the default is to not attempt any retries."
- httpRetryPauseMs:
- type: integer
- description: "The pause to take before retrying a failed HTTP request. If not set, the default is to not pause at all but to immediately repeat a request."
- includeAcceptHeader:
- type: boolean
- description: Whether the Accept header should be set in requests to the authorization servers. The default value is `true`.
- maxTokenExpirySeconds:
- type: integer
- description: Set or limit time-to-live of the access tokens to the specified number of seconds. This should be set if the authorization server returns opaque tokens.
- passwordSecret:
- type: object
- properties:
- secretName:
- type: string
- description: The name of the Secret containing the password.
- password:
- type: string
- description: The name of the key in the Secret under which the password is stored.
- required:
- - secretName
- - password
- description: Reference to the `Secret` which holds the password.
- readTimeoutSeconds:
- type: integer
- description: "The read timeout in seconds when connecting to authorization server. If not set, the effective read timeout is 60 seconds."
- refreshToken:
- type: object
- properties:
- key:
- type: string
- description: The key under which the secret value is stored in the Kubernetes Secret.
- secretName:
- type: string
- description: The name of the Kubernetes Secret containing the secret value.
- required:
- - key
- - secretName
- description: Link to Kubernetes Secret containing the refresh token which can be used to obtain access token from the authorization server.
- scope:
- type: string
- description: OAuth scope to use when authenticating against the authorization server. Some authorization servers require this to be set. The possible values depend on how authorization server is configured. By default `scope` is not specified when doing the token endpoint request.
- tlsTrustedCertificates:
- type: array
- items:
- type: object
- properties:
- secretName:
- type: string
- description: The name of the Secret containing the certificate.
- certificate:
- type: string
- description: The name of the file certificate in the secret.
- pattern:
- type: string
- description: "Pattern for the certificate files in the secret. Use the link:https://en.wikipedia.org/wiki/Glob_(programming)[_glob syntax_] for the pattern. All files in the secret that match the pattern are used."
- oneOf:
- - properties:
- certificate: {}
- required:
- - certificate
- - properties:
- pattern: {}
- required:
- - pattern
- required:
- - secretName
- description: Trusted certificates for TLS connection to the OAuth server.
- tokenEndpointUri:
- type: string
- description: Authorization server token endpoint URI.
- type:
- type: string
- enum:
- - tls
- - scram-sha-256
- - scram-sha-512
- - plain
- - oauth
- description: "Authentication type. Currently the supported types are `tls`, `scram-sha-256`, `scram-sha-512`, `plain`, and 'oauth'. `scram-sha-256` and `scram-sha-512` types use SASL SCRAM-SHA-256 and SASL SCRAM-SHA-512 Authentication, respectively. `plain` type uses SASL PLAIN Authentication. `oauth` type uses SASL OAUTHBEARER Authentication. The `tls` type uses TLS Client Authentication. The `tls` type is supported only over TLS connections."
- username:
- type: string
- description: Username used for the authentication.
- required:
- - type
- description: Authentication configuration for connecting to the cluster.
- tls:
- type: object
- properties:
- trustedCertificates:
- type: array
- items:
- type: object
- properties:
- secretName:
- type: string
- description: The name of the Secret containing the certificate.
- certificate:
- type: string
- description: The name of the file certificate in the secret.
- pattern:
- type: string
- description: "Pattern for the certificate files in the secret. Use the link:https://en.wikipedia.org/wiki/Glob_(programming)[_glob syntax_] for the pattern. All files in the secret that match the pattern are used."
- oneOf:
- - properties:
- certificate: {}
- required:
- - certificate
- - properties:
- pattern: {}
- required:
- - pattern
- required:
- - secretName
- description: Trusted certificates for TLS connection.
- description: TLS configuration for connecting MirrorMaker to the cluster.
- config:
- x-kubernetes-preserve-unknown-fields: true
- type: object
- description: "The MirrorMaker consumer config. Properties with the following prefixes cannot be set: ssl., bootstrap.servers, group.id, sasl., security., interceptor.classes (with the exception of: ssl.endpoint.identification.algorithm, ssl.cipher.suites, ssl.protocol, ssl.enabled.protocols)."
- required:
- - bootstrapServers
- - groupId
- description: Configuration of source cluster.
- producer:
- type: object
- properties:
- bootstrapServers:
+ description: Message for the user related to the replicas change request. This may contain transient error messages that would disappear on periodic reconciliations.
+ sessionId:
type: string
- description: A list of host:port pairs for establishing the initial connection to the Kafka cluster.
- abortOnSendFailure:
- type: boolean
- description: Flag to set the MirrorMaker to exit on a failed send. Default value is `true`.
- authentication:
- type: object
- properties:
- accessToken:
- type: object
- properties:
- key:
- type: string
- description: The key under which the secret value is stored in the Kubernetes Secret.
- secretName:
- type: string
- description: The name of the Kubernetes Secret containing the secret value.
- required:
- - key
- - secretName
- description: Link to Kubernetes Secret containing the access token which was obtained from the authorization server.
- accessTokenIsJwt:
- type: boolean
- description: Configure whether access token should be treated as JWT. This should be set to `false` if the authorization server returns opaque tokens. Defaults to `true`.
- audience:
- type: string
- description: "OAuth audience to use when authenticating against the authorization server. Some authorization servers require the audience to be explicitly set. The possible values depend on how the authorization server is configured. By default, `audience` is not specified when performing the token endpoint request."
- certificateAndKey:
- type: object
- properties:
- secretName:
- type: string
- description: The name of the Secret containing the certificate.
- certificate:
- type: string
- description: The name of the file certificate in the Secret.
- key:
- type: string
- description: The name of the private key in the Secret.
- required:
- - secretName
- - certificate
- - key
- description: Reference to the `Secret` which holds the certificate and private key pair.
- clientId:
- type: string
- description: OAuth Client ID which the Kafka client can use to authenticate against the OAuth server and use the token endpoint URI.
- clientSecret:
- type: object
- properties:
- key:
- type: string
- description: The key under which the secret value is stored in the Kubernetes Secret.
- secretName:
- type: string
- description: The name of the Kubernetes Secret containing the secret value.
- required:
- - key
- - secretName
- description: Link to Kubernetes Secret containing the OAuth client secret which the Kafka client can use to authenticate against the OAuth server and use the token endpoint URI.
- connectTimeoutSeconds:
- type: integer
- description: "The connect timeout in seconds when connecting to authorization server. If not set, the effective connect timeout is 60 seconds."
- disableTlsHostnameVerification:
- type: boolean
- description: Enable or disable TLS hostname verification. Default value is `false`.
- enableMetrics:
- type: boolean
- description: Enable or disable OAuth metrics. Default value is `false`.
- httpRetries:
- type: integer
- description: "The maximum number of retries to attempt if an initial HTTP request fails. If not set, the default is to not attempt any retries."
- httpRetryPauseMs:
- type: integer
- description: "The pause to take before retrying a failed HTTP request. If not set, the default is to not pause at all but to immediately repeat a request."
- includeAcceptHeader:
- type: boolean
- description: Whether the Accept header should be set in requests to the authorization servers. The default value is `true`.
- maxTokenExpirySeconds:
- type: integer
- description: Set or limit time-to-live of the access tokens to the specified number of seconds. This should be set if the authorization server returns opaque tokens.
- passwordSecret:
- type: object
- properties:
- secretName:
- type: string
- description: The name of the Secret containing the password.
- password:
- type: string
- description: The name of the key in the Secret under which the password is stored.
- required:
- - secretName
- - password
- description: Reference to the `Secret` which holds the password.
- readTimeoutSeconds:
- type: integer
- description: "The read timeout in seconds when connecting to authorization server. If not set, the effective read timeout is 60 seconds."
- refreshToken:
- type: object
- properties:
- key:
- type: string
- description: The key under which the secret value is stored in the Kubernetes Secret.
- secretName:
- type: string
- description: The name of the Kubernetes Secret containing the secret value.
- required:
- - key
- - secretName
- description: Link to Kubernetes Secret containing the refresh token which can be used to obtain access token from the authorization server.
- scope:
- type: string
- description: OAuth scope to use when authenticating against the authorization server. Some authorization servers require this to be set. The possible values depend on how authorization server is configured. By default `scope` is not specified when doing the token endpoint request.
- tlsTrustedCertificates:
- type: array
- items:
- type: object
- properties:
- secretName:
- type: string
- description: The name of the Secret containing the certificate.
- certificate:
- type: string
- description: The name of the file certificate in the secret.
- pattern:
- type: string
- description: "Pattern for the certificate files in the secret. Use the link:https://en.wikipedia.org/wiki/Glob_(programming)[_glob syntax_] for the pattern. All files in the secret that match the pattern are used."
- oneOf:
- - properties:
- certificate: {}
- required:
- - certificate
- - properties:
- pattern: {}
- required:
- - pattern
- required:
- - secretName
- description: Trusted certificates for TLS connection to the OAuth server.
- tokenEndpointUri:
- type: string
- description: Authorization server token endpoint URI.
- type:
- type: string
- enum:
- - tls
- - scram-sha-256
- - scram-sha-512
- - plain
- - oauth
- description: "Authentication type. Currently the supported types are `tls`, `scram-sha-256`, `scram-sha-512`, `plain`, and 'oauth'. `scram-sha-256` and `scram-sha-512` types use SASL SCRAM-SHA-256 and SASL SCRAM-SHA-512 Authentication, respectively. `plain` type uses SASL PLAIN Authentication. `oauth` type uses SASL OAUTHBEARER Authentication. The `tls` type uses TLS Client Authentication. The `tls` type is supported only over TLS connections."
- username:
- type: string
- description: Username used for the authentication.
- required:
- - type
- description: Authentication configuration for connecting to the cluster.
- config:
- x-kubernetes-preserve-unknown-fields: true
- type: object
- description: "The MirrorMaker producer config. Properties with the following prefixes cannot be set: ssl., bootstrap.servers, sasl., security., interceptor.classes (with the exception of: ssl.endpoint.identification.algorithm, ssl.cipher.suites, ssl.protocol, ssl.enabled.protocols)."
- tls:
- type: object
- properties:
- trustedCertificates:
- type: array
- items:
- type: object
- properties:
- secretName:
- type: string
- description: The name of the Secret containing the certificate.
- certificate:
- type: string
- description: The name of the file certificate in the secret.
- pattern:
- type: string
- description: "Pattern for the certificate files in the secret. Use the link:https://en.wikipedia.org/wiki/Glob_(programming)[_glob syntax_] for the pattern. All files in the secret that match the pattern are used."
- oneOf:
- - properties:
- certificate: {}
- required:
- - certificate
- - properties:
- pattern: {}
- required:
- - pattern
- required:
- - secretName
- description: Trusted certificates for TLS connection.
- description: TLS configuration for connecting MirrorMaker to the cluster.
- required:
- - bootstrapServers
- description: Configuration of target cluster.
- resources:
+ description: The session identifier for replicas change requests pertaining to this KafkaTopic resource. This is used by the Topic Operator to track the status of `ongoing` replicas change operations.
+ description: Replication factor change status.
+ description: The status of the topic.
+ - name: v1beta1
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ additionalPrinterColumns:
+ - name: Cluster
+ description: The name of the Kafka cluster this topic belongs to
+ jsonPath: .metadata.labels.strimzi\.io/cluster
+ type: string
+ - name: Partitions
+ description: The desired number of partitions in the topic
+ jsonPath: .spec.partitions
+ type: integer
+ - name: Replication factor
+ description: The desired number of replicas of each partition
+ jsonPath: .spec.replicas
+ type: integer
+ - name: Ready
+ description: The state of the custom resource
+ jsonPath: ".status.conditions[?(@.type==\"Ready\")].status"
+ type: string
+ schema:
+ openAPIV3Schema:
+ type: object
+ properties:
+ apiVersion:
+ type: string
+ description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources"
+ kind:
+ type: string
+ description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
+ metadata:
+ type: object
+ spec:
+ type: object
+ properties:
+ topicName:
+ type: string
+ description: The name of the topic. When absent this will default to the metadata.name of the topic. It is recommended to not set this unless the topic name is not a valid Kubernetes resource name.
+ partitions:
+ type: integer
+ minimum: 1
+ description: "The number of partitions the topic should have. This cannot be decreased after topic creation. It can be increased after topic creation, but it is important to understand the consequences that has, especially for topics with semantic partitioning. When absent this will default to the broker configuration for `num.partitions`."
+ replicas:
+ type: integer
+ minimum: 1
+ maximum: 32767
+ description: The number of replicas the topic should have. When absent this will default to the broker configuration for `default.replication.factor`.
+ config:
+ x-kubernetes-preserve-unknown-fields: true
type: object
- properties:
- claims:
- type: array
- items:
- type: object
- properties:
- name:
- type: string
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
- x-kubernetes-int-or-string: true
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
- x-kubernetes-int-or-string: true
- type: object
- description: CPU and memory resources to reserve.
- whitelist:
+ description: The topic configuration.
+ description: The specification of the topic.
+ status:
+ type: object
+ properties:
+ conditions:
+ type: array
+ items:
+ type: object
+ properties:
+ type:
+ type: string
+ description: "The unique identifier of a condition, used to distinguish between other conditions in the resource."
+ status:
+ type: string
+ description: "The status of the condition, either True, False or Unknown."
+ lastTransitionTime:
+ type: string
+ description: "Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone."
+ reason:
+ type: string
+ description: The reason for the condition's last transition (a single word in CamelCase).
+ message:
+ type: string
+ description: Human-readable message indicating details about the condition's last transition.
+ description: List of status conditions.
+ observedGeneration:
+ type: integer
+ description: The generation of the CRD that was last reconciled by the operator.
+ topicName:
type: string
- description: "List of topics which are included for mirroring. This option allows any regular expression using Java-style regular expressions. Mirroring two topics named A and B is achieved by using the expression `A\\|B`. Or, as a special case, you can mirror all topics using the regular expression `*`. You can also specify multiple regular expressions separated by commas."
- include:
+ description: Topic name.
+ topicId:
type: string
- description: "List of topics which are included for mirroring. This option allows any regular expression using Java-style regular expressions. Mirroring two topics named A and B is achieved by using the expression `A\\|B`. Or, as a special case, you can mirror all topics using the regular expression `*`. You can also specify multiple regular expressions separated by commas."
- jvmOptions:
+ description: "The topic's id. For a KafkaTopic with the ready condition, this will change only if the topic gets deleted and recreated with the same name."
+ replicasChange:
type: object
properties:
- "-XX":
- additionalProperties:
- type: string
- type: object
- description: A map of -XX options to the JVM.
- "-Xmx":
+ targetReplicas:
+ type: integer
+ description: The target replicas value requested by the user. This may be different from .spec.replicas when a change is ongoing.
+ state:
type: string
- pattern: "^[0-9]+[mMgG]?$"
- description: -Xmx option to to the JVM.
- "-Xms":
+ enum:
+ - pending
+ - ongoing
+ description: "Current state of the replicas change operation. This can be `pending`, when the change has been requested, or `ongoing`, when the change has been successfully submitted to Cruise Control."
+ message:
type: string
- pattern: "^[0-9]+[mMgG]?$"
- description: -Xms option to to the JVM.
- gcLoggingEnabled:
- type: boolean
- description: Specifies whether the Garbage Collection logging is enabled. The default is false.
- javaSystemProperties:
- type: array
- items:
- type: object
- properties:
- name:
- type: string
- description: The system property name.
- value:
- type: string
- description: The system property value.
- description: A map of additional system properties which will be passed using the `-D` option to the JVM.
- description: JVM Options for pods.
- logging:
+ description: Message for the user related to the replicas change request. This may contain transient error messages that would disappear on periodic reconciliations.
+ sessionId:
+ type: string
+ description: The session identifier for replicas change requests pertaining to this KafkaTopic resource. This is used by the Topic Operator to track the status of `ongoing` replicas change operations.
+ description: Replication factor change status.
+ description: The status of the topic.
+ - name: v1alpha1
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ additionalPrinterColumns:
+ - name: Cluster
+ description: The name of the Kafka cluster this topic belongs to
+ jsonPath: .metadata.labels.strimzi\.io/cluster
+ type: string
+ - name: Partitions
+ description: The desired number of partitions in the topic
+ jsonPath: .spec.partitions
+ type: integer
+ - name: Replication factor
+ description: The desired number of replicas of each partition
+ jsonPath: .spec.replicas
+ type: integer
+ - name: Ready
+ description: The state of the custom resource
+ jsonPath: ".status.conditions[?(@.type==\"Ready\")].status"
+ type: string
+ schema:
+ openAPIV3Schema:
+ type: object
+ properties:
+ apiVersion:
+ type: string
+ description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources"
+ kind:
+ type: string
+ description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
+ metadata:
+ type: object
+ spec:
+ type: object
+ properties:
+ topicName:
+ type: string
+ description: The name of the topic. When absent this will default to the metadata.name of the topic. It is recommended to not set this unless the topic name is not a valid Kubernetes resource name.
+ partitions:
+ type: integer
+ minimum: 1
+ description: "The number of partitions the topic should have. This cannot be decreased after topic creation. It can be increased after topic creation, but it is important to understand the consequences that has, especially for topics with semantic partitioning. When absent this will default to the broker configuration for `num.partitions`."
+ replicas:
+ type: integer
+ minimum: 1
+ maximum: 32767
+ description: The number of replicas the topic should have. When absent this will default to the broker configuration for `default.replication.factor`.
+ config:
+ x-kubernetes-preserve-unknown-fields: true
type: object
- properties:
- loggers:
- additionalProperties:
+ description: The topic configuration.
+ description: The specification of the topic.
+ status:
+ type: object
+ properties:
+ conditions:
+ type: array
+ items:
+ type: object
+ properties:
+ type:
type: string
- type: object
- description: A Map from logger name to logger level.
- type:
- type: string
- enum:
- - inline
- - external
- description: "Logging type, must be either 'inline' or 'external'."
- valueFrom:
- type: object
- properties:
- configMapKeyRef:
- type: object
- properties:
- key:
- type: string
- name:
- type: string
- optional:
- type: boolean
- description: Reference to the key in the ConfigMap containing the configuration.
- description: '`ConfigMap` entry where the logging configuration is stored. '
- required:
- - type
- description: Logging configuration for MirrorMaker.
- metricsConfig:
+ description: "The unique identifier of a condition, used to distinguish between other conditions in the resource."
+ status:
+ type: string
+ description: "The status of the condition, either True, False or Unknown."
+ lastTransitionTime:
+ type: string
+ description: "Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone."
+ reason:
+ type: string
+ description: The reason for the condition's last transition (a single word in CamelCase).
+ message:
+ type: string
+ description: Human-readable message indicating details about the condition's last transition.
+ description: List of status conditions.
+ observedGeneration:
+ type: integer
+ description: The generation of the CRD that was last reconciled by the operator.
+ topicName:
+ type: string
+ description: Topic name.
+ topicId:
+ type: string
+ description: "The topic's id. For a KafkaTopic with the ready condition, this will change only if the topic gets deleted and recreated with the same name."
+ replicasChange:
type: object
properties:
- type:
+ targetReplicas:
+ type: integer
+ description: The target replicas value requested by the user. This may be different from .spec.replicas when a change is ongoing.
+ state:
type: string
enum:
- - jmxPrometheusExporter
- description: Metrics type. Only 'jmxPrometheusExporter' supported currently.
- valueFrom:
- type: object
- properties:
- configMapKeyRef:
- type: object
- properties:
- key:
- type: string
- name:
- type: string
- optional:
- type: boolean
- description: Reference to the key in the ConfigMap containing the configuration.
- description: 'ConfigMap entry where the Prometheus JMX Exporter configuration is stored. '
- required:
- - type
- - valueFrom
- description: Metrics configuration.
- tracing:
- type: object
- properties:
- type:
+ - pending
+ - ongoing
+ description: "Current state of the replicas change operation. This can be `pending`, when the change has been requested, or `ongoing`, when the change has been successfully submitted to Cruise Control."
+ message:
type: string
- enum:
- - jaeger
- - opentelemetry
- description: "Type of the tracing used. Currently the only supported type is `opentelemetry` for OpenTelemetry tracing. As of Strimzi 0.37.0, `jaeger` type is not supported anymore and this option is ignored."
- required:
- - type
- description: The configuration of tracing in Kafka MirrorMaker.
- template:
- type: object
- properties:
- deployment:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- deploymentStrategy:
- type: string
- enum:
- - RollingUpdate
- - Recreate
- description: Pod replacement strategy for deployment configuration changes. Valid values are `RollingUpdate` and `Recreate`. Defaults to `RollingUpdate`.
- description: Template for Kafka MirrorMaker `Deployment`.
- pod:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- imagePullSecrets:
- type: array
- items:
- type: object
- properties:
- name:
- type: string
- description: "List of references to secrets in the same namespace to use for pulling any of the images used by this Pod. When the `STRIMZI_IMAGE_PULL_SECRETS` environment variable in Cluster Operator and the `imagePullSecrets` option are specified, only the `imagePullSecrets` variable is used and the `STRIMZI_IMAGE_PULL_SECRETS` variable is ignored."
- securityContext:
- type: object
- properties:
- appArmorProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- fsGroup:
- type: integer
- fsGroupChangePolicy:
- type: string
- runAsGroup:
- type: integer
- runAsNonRoot:
- type: boolean
- runAsUser:
- type: integer
- seLinuxOptions:
- type: object
- properties:
- level:
- type: string
- role:
- type: string
- type:
- type: string
- user:
- type: string
- seccompProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- supplementalGroups:
- type: array
- items:
- type: integer
- sysctls:
- type: array
- items:
+ description: Message for the user related to the replicas change request. This may contain transient error messages that would disappear on periodic reconciliations.
+ sessionId:
+ type: string
+ description: The session identifier for replicas change requests pertaining to this KafkaTopic resource. This is used by the Topic Operator to track the status of `ongoing` replicas change operations.
+ description: Replication factor change status.
+ description: The status of the topic.
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: strimzi-cluster-operator-kafka-client-delegation
+ labels:
+ app: strimzi
+# The Kafka clients cluster role must be bound to the cluster operator service account so that it can delegate the
+# cluster role to the Kafka clients using it for consuming from closest replica.
+# This must be done to avoid escalating privileges which would be blocked by Kubernetes.
+subjects:
+ - kind: ServiceAccount
+ name: strimzi-cluster-operator
+ namespace: myproject
+roleRef:
+ kind: ClusterRole
+ name: strimzi-kafka-client
+ apiGroup: rbac.authorization.k8s.io
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: strimzi-cluster-operator-leader-election
+ labels:
+ app: strimzi
+subjects:
+ - kind: ServiceAccount
+ name: strimzi-cluster-operator
+ namespace: myproject
+roleRef:
+ kind: ClusterRole
+ name: strimzi-cluster-operator-leader-election
+ apiGroup: rbac.authorization.k8s.io
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: strimzi-cluster-operator-leader-election
+ labels:
+ app: strimzi
+rules:
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ # The cluster operator needs to access and manage leases for leader election
+ # The "create" verb cannot be used with "resourceNames"
+ - leases
+ verbs:
+ - create
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ # The cluster operator needs to access and manage leases for leader election
+ - leases
+ resourceNames:
+ # The default RBAC files give the operator only access to the Lease resource names strimzi-cluster-operator
+ # If you want to use another resource name or resource namespace, you have to configure the RBAC resources accordingly
+ - strimzi-cluster-operator
+ verbs:
+ - get
+ - list
+ - watch
+ - delete
+ - patch
+ - update
+
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: kafkas.kafka.strimzi.io
+ labels:
+ app: strimzi
+ strimzi.io/crd-install: "true"
+spec:
+ group: kafka.strimzi.io
+ names:
+ kind: Kafka
+ listKind: KafkaList
+ singular: kafka
+ plural: kafkas
+ shortNames:
+ - k
+ categories:
+ - strimzi
+ scope: Namespaced
+ conversion:
+ strategy: None
+ versions:
+ - name: v1beta2
+ served: true
+ storage: true
+ subresources:
+ status: {}
+ additionalPrinterColumns:
+ - name: Desired Kafka replicas
+ description: The desired number of Kafka replicas in the cluster
+ jsonPath: .spec.kafka.replicas
+ type: integer
+ - name: Desired ZK replicas
+ description: The desired number of ZooKeeper replicas in the cluster
+ jsonPath: .spec.zookeeper.replicas
+ type: integer
+ - name: Ready
+ description: The state of the custom resource
+ jsonPath: ".status.conditions[?(@.type==\"Ready\")].status"
+ type: string
+ - name: Metadata State
+ description: The state of the cluster metadata
+ jsonPath: .status.kafkaMetadataState
+ type: string
+ - name: Warnings
+ description: Warnings related to the custom resource
+ jsonPath: ".status.conditions[?(@.type==\"Warning\")].status"
+ type: string
+ schema:
+ openAPIV3Schema:
+ type: object
+ properties:
+ apiVersion:
+ type: string
+ description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources"
+ kind:
+ type: string
+ description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
+ metadata:
+ type: object
+ spec:
+ type: object
+ properties:
+ kafka:
+ type: object
+ properties:
+ version:
+ type: string
+ description: The Kafka broker version. Defaults to the latest version. Consult the user documentation to understand the process required to upgrade or downgrade the version.
+ metadataVersion:
+ type: string
+ description: "The KRaft metadata version used by the Kafka cluster. This property is ignored when running in ZooKeeper mode. If the property is not set, it defaults to the metadata version that corresponds to the `version` property."
+ replicas:
+ type: integer
+ minimum: 1
+ description: The number of pods in the cluster. This property is required when node pools are not used.
+ image:
+ type: string
+ description: "The container image used for Kafka pods. If the property is not set, the default Kafka image version is determined based on the `version` configuration. The image names are specifically mapped to corresponding versions in the Cluster Operator configuration. Changing the Kafka image version does not automatically update the image versions for other components, such as Kafka Exporter. "
+ listeners:
+ type: array
+ minItems: 1
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ pattern: "^[a-z0-9]{1,11}$"
+ description: Name of the listener. The name will be used to identify the listener and the related Kubernetes objects. The name has to be unique within given a Kafka cluster. The name can consist of lowercase characters and numbers and be up to 11 characters long.
+ port:
+ type: integer
+ minimum: 9092
+ description: "Port number used by the listener inside Kafka. The port number has to be unique within a given Kafka cluster. Allowed port numbers are 9092 and higher with the exception of ports 9404 and 9999, which are already used for Prometheus and JMX. Depending on the listener type, the port number might not be the same as the port number that connects Kafka clients."
+ type:
+ type: string
+ enum:
+ - internal
+ - route
+ - loadbalancer
+ - nodeport
+ - ingress
+ - cluster-ip
+ description: "Type of the listener. The supported types are as follows: \n\n* `internal` type exposes Kafka internally only within the Kubernetes cluster.\n* `route` type uses OpenShift Routes to expose Kafka.\n* `loadbalancer` type uses LoadBalancer type services to expose Kafka.\n* `nodeport` type uses NodePort type services to expose Kafka.\n* `ingress` type uses Kubernetes Nginx Ingress to expose Kafka with TLS passthrough.\n* `cluster-ip` type uses a per-broker `ClusterIP` service.\n"
+ tls:
+ type: boolean
+ description: "Enables TLS encryption on the listener. This is a required property. For `route` and `ingress` type listeners, TLS encryption must be always enabled."
+ authentication:
+ type: object
+ properties:
+ accessTokenIsJwt:
+ type: boolean
+ description: Configure whether the access token is treated as JWT. This must be set to `false` if the authorization server returns opaque tokens. Defaults to `true`.
+ checkAccessTokenType:
+ type: boolean
+ description: Configure whether the access token type check is performed or not. This should be set to `false` if the authorization server does not include 'typ' claim in JWT token. Defaults to `true`.
+ checkAudience:
+ type: boolean
+ description: "Enable or disable audience checking. Audience checks identify the recipients of tokens. If audience checking is enabled, the OAuth Client ID also has to be configured using the `clientId` property. The Kafka broker will reject tokens that do not have its `clientId` in their `aud` (audience) claim.Default value is `false`."
+ checkIssuer:
+ type: boolean
+ description: Enable or disable issuer checking. By default issuer is checked using the value configured by `validIssuerUri`. Default value is `true`.
+ clientAudience:
+ type: string
+ description: The audience to use when making requests to the authorization server's token endpoint. Used for inter-broker authentication and for configuring OAuth 2.0 over PLAIN using the `clientId` and `secret` method.
+ clientId:
+ type: string
+ description: OAuth Client ID which the Kafka broker can use to authenticate against the authorization server and use the introspect endpoint URI.
+ clientScope:
+ type: string
+ description: The scope to use when making requests to the authorization server's token endpoint. Used for inter-broker authentication and for configuring OAuth 2.0 over PLAIN using the `clientId` and `secret` method.
+ clientSecret:
type: object
properties:
- name:
+ key:
type: string
- value:
+ description: The key under which the secret value is stored in the Kubernetes Secret.
+ secretName:
type: string
- windowsOptions:
- type: object
- properties:
- gmsaCredentialSpec:
- type: string
- gmsaCredentialSpecName:
- type: string
- hostProcess:
- type: boolean
- runAsUserName:
- type: string
- description: Configures pod-level security attributes and common container settings.
- terminationGracePeriodSeconds:
- type: integer
- minimum: 0
- description: "The grace period is the duration in seconds after the processes running in the pod are sent a termination signal, and the time when the processes are forcibly halted with a kill signal. Set this value to longer than the expected cleanup time for your process. Value must be a non-negative integer. A zero value indicates delete immediately. You might need to increase the grace period for very large Kafka clusters, so that the Kafka brokers have enough time to transfer their work to another broker before they are terminated. Defaults to 30 seconds."
- affinity:
- type: object
- properties:
- nodeAffinity:
- type: object
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- type: array
- items:
- type: object
- properties:
- preference:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchFields:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- weight:
- type: integer
- requiredDuringSchedulingIgnoredDuringExecution:
+ description: The name of the Kubernetes Secret containing the secret value.
+ required:
+ - key
+ - secretName
+ description: Link to Kubernetes Secret containing the OAuth client secret which the Kafka broker can use to authenticate against the authorization server and use the introspect endpoint URI.
+ connectTimeoutSeconds:
+ type: integer
+ description: "The connect timeout in seconds when connecting to authorization server. If not set, the effective connect timeout is 60 seconds."
+ customClaimCheck:
+ type: string
+ description: JsonPath filter query to be applied to the JWT token or to the response of the introspection endpoint for additional token validation. Not set by default.
+ disableTlsHostnameVerification:
+ type: boolean
+ description: Enable or disable TLS hostname verification. Default value is `false`.
+ enableECDSA:
+ type: boolean
+ description: Enable or disable ECDSA support by installing BouncyCastle crypto provider. ECDSA support is always enabled. The BouncyCastle libraries are no longer packaged with Strimzi. Value is ignored.
+ enableMetrics:
+ type: boolean
+ description: Enable or disable OAuth metrics. Default value is `false`.
+ enableOauthBearer:
+ type: boolean
+ description: Enable or disable OAuth authentication over SASL_OAUTHBEARER. Default value is `true`.
+ enablePlain:
+ type: boolean
+ description: Enable or disable OAuth authentication over SASL_PLAIN. There is no re-authentication support when this mechanism is used. Default value is `false`.
+ failFast:
+ type: boolean
+ description: Enable or disable termination of Kafka broker processes due to potentially recoverable runtime errors during startup. Default value is `true`.
+ fallbackUserNameClaim:
+ type: string
+ description: The fallback username claim to be used for the user ID if the claim specified by `userNameClaim` is not present. This is useful when `client_credentials` authentication only results in the client ID being provided in another claim. It only takes effect if `userNameClaim` is set.
+ fallbackUserNamePrefix:
+ type: string
+ description: "The prefix to use with the value of `fallbackUserNameClaim` to construct the user id. This only takes effect if `fallbackUserNameClaim` is true, and the value is present for the claim. Mapping usernames and client ids into the same user id space is useful in preventing name collisions."
+ groupsClaim:
+ type: string
+ description: JsonPath query used to extract groups for the user during authentication. Extracted groups can be used by a custom authorizer. By default no groups are extracted.
+ groupsClaimDelimiter:
+ type: string
+ description: "A delimiter used to parse groups when they are extracted as a single String value rather than a JSON array. Default value is ',' (comma)."
+ httpRetries:
+ type: integer
+ description: "The maximum number of retries to attempt if an initial HTTP request fails. If not set, the default is to not attempt any retries."
+ httpRetryPauseMs:
+ type: integer
+ description: "The pause to take before retrying a failed HTTP request. If not set, the default is to not pause at all but to immediately repeat a request."
+ includeAcceptHeader:
+ type: boolean
+ description: Whether the Accept header should be set in requests to the authorization servers. The default value is `true`.
+ introspectionEndpointUri:
+ type: string
+ description: URI of the token introspection endpoint which can be used to validate opaque non-JWT tokens.
+ jwksEndpointUri:
+ type: string
+ description: "URI of the JWKS certificate endpoint, which can be used for local JWT validation."
+ jwksExpirySeconds:
+ type: integer
+ minimum: 1
+ description: Configures how often are the JWKS certificates considered valid. The expiry interval has to be at least 60 seconds longer then the refresh interval specified in `jwksRefreshSeconds`. Defaults to 360 seconds.
+ jwksIgnoreKeyUse:
+ type: boolean
+ description: Flag to ignore the 'use' attribute of `key` declarations in a JWKS endpoint response. Default value is `false`.
+ jwksMinRefreshPauseSeconds:
+ type: integer
+ minimum: 0
+ description: "The minimum pause between two consecutive refreshes. When an unknown signing key is encountered the refresh is scheduled immediately, but will always wait for this minimum pause. Defaults to 1 second."
+ jwksRefreshSeconds:
+ type: integer
+ minimum: 1
+ description: Configures how often are the JWKS certificates refreshed. The refresh interval has to be at least 60 seconds shorter then the expiry interval specified in `jwksExpirySeconds`. Defaults to 300 seconds.
+ listenerConfig:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ description: Configuration to be used for a specific listener. All values are prefixed with `listener.name.`.
+ maxSecondsWithoutReauthentication:
+ type: integer
+ description: "Maximum number of seconds the authenticated session remains valid without re-authentication. This enables Apache Kafka re-authentication feature, and causes sessions to expire when the access token expires. If the access token expires before max time or if max time is reached, the client has to re-authenticate, otherwise the server will drop the connection. Not set by default - the authenticated session does not expire when the access token expires. This option only applies to SASL_OAUTHBEARER authentication mechanism (when `enableOauthBearer` is `true`)."
+ readTimeoutSeconds:
+ type: integer
+ description: "The read timeout in seconds when connecting to authorization server. If not set, the effective read timeout is 60 seconds."
+ sasl:
+ type: boolean
+ description: Enable or disable SASL on this listener.
+ secrets:
+ type: array
+ items:
type: object
properties:
- nodeSelectorTerms:
- type: array
- items:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchFields:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- podAffinity:
- type: object
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- type: array
- items:
- type: object
- properties:
- podAffinityTerm:
- type: object
- properties:
- labelSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- matchLabelKeys:
- type: array
- items:
- type: string
- mismatchLabelKeys:
- type: array
- items:
- type: string
- namespaceSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- namespaces:
- type: array
- items:
- type: string
- topologyKey:
- type: string
- weight:
- type: integer
- requiredDuringSchedulingIgnoredDuringExecution:
- type: array
- items:
- type: object
- properties:
- labelSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- matchLabelKeys:
- type: array
- items:
- type: string
- mismatchLabelKeys:
- type: array
- items:
- type: string
- namespaceSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- namespaces:
- type: array
- items:
- type: string
- topologyKey:
- type: string
- podAntiAffinity:
- type: object
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- type: array
- items:
- type: object
- properties:
- podAffinityTerm:
- type: object
- properties:
- labelSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- matchLabelKeys:
- type: array
- items:
- type: string
- mismatchLabelKeys:
- type: array
- items:
- type: string
- namespaceSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- namespaces:
- type: array
- items:
- type: string
- topologyKey:
- type: string
- weight:
- type: integer
- requiredDuringSchedulingIgnoredDuringExecution:
- type: array
- items:
- type: object
- properties:
- labelSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- matchLabelKeys:
- type: array
- items:
- type: string
- mismatchLabelKeys:
- type: array
- items:
- type: string
- namespaceSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- namespaces:
- type: array
- items:
- type: string
- topologyKey:
- type: string
- description: The pod's affinity rules.
- tolerations:
- type: array
- items:
- type: object
- properties:
- effect:
+ key:
+ type: string
+ description: The key under which the secret value is stored in the Kubernetes Secret.
+ secretName:
+ type: string
+ description: The name of the Kubernetes Secret containing the secret value.
+ required:
+ - key
+ - secretName
+ description: Secrets to be mounted to `/opt/kafka/custom-authn-secrets/custom-listener--/`.
+ serverBearerTokenLocation:
type: string
- key:
+ description: Path to the file on the local filesystem that contains a bearer token to be used instead of client ID and secret when authenticating to authorization server.
+ tlsTrustedCertificates:
+ type: array
+ items:
+ type: object
+ properties:
+ secretName:
+ type: string
+ description: The name of the Secret containing the certificate.
+ certificate:
+ type: string
+ description: The name of the file certificate in the secret.
+ pattern:
+ type: string
+ description: "Pattern for the certificate files in the secret. Use the link:https://en.wikipedia.org/wiki/Glob_(programming)[_glob syntax_] for the pattern. All files in the secret that match the pattern are used."
+ oneOf:
+ - properties:
+ certificate: {}
+ required:
+ - certificate
+ - properties:
+ pattern: {}
+ required:
+ - pattern
+ required:
+ - secretName
+ description: Trusted certificates for TLS connection to the OAuth server.
+ tokenEndpointUri:
type: string
- operator:
+ description: "URI of the Token Endpoint to use with SASL_PLAIN mechanism when the client authenticates with `clientId` and a `secret`. If set, the client can authenticate over SASL_PLAIN by either setting `username` to `clientId`, and setting `password` to client `secret`, or by setting `username` to account username, and `password` to access token prefixed with `$accessToken:`. If this option is not set, the `password` is always interpreted as an access token (without a prefix), and `username` as the account username (a so called 'no-client-credentials' mode)."
+ type:
type: string
- tolerationSeconds:
- type: integer
- value:
+ enum:
+ - tls
+ - scram-sha-512
+ - oauth
+ - custom
+ description: Authentication type. `oauth` type uses SASL OAUTHBEARER Authentication. `scram-sha-512` type uses SASL SCRAM-SHA-512 Authentication. `tls` type uses TLS Client Authentication. `tls` type is supported only on TLS listeners.`custom` type allows for any authentication type to be used.
+ userInfoEndpointUri:
type: string
- description: The pod's tolerations.
- topologySpreadConstraints:
- type: array
- items:
- type: object
- properties:
- labelSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- matchLabelKeys:
- type: array
- items:
- type: string
- maxSkew:
- type: integer
- minDomains:
- type: integer
- nodeAffinityPolicy:
+ description: 'URI of the User Info Endpoint to use as a fallback to obtaining the user id when the Introspection Endpoint does not return information that can be used for the user id. '
+ userNameClaim:
type: string
- nodeTaintsPolicy:
+ description: "Name of the claim from the JWT authentication token, Introspection Endpoint response or User Info Endpoint response which will be used to extract the user id. Defaults to `sub`."
+ userNamePrefix:
type: string
- topologyKey:
+ description: "The prefix to use with the value of `userNameClaim` to construct the user ID. This only takes effect if `userNameClaim` is specified and the value is present for the claim. When used in combination with `fallbackUserNameClaims`, it ensures consistent mapping of usernames and client IDs into the same user ID space and prevents name collisions."
+ validIssuerUri:
type: string
- whenUnsatisfiable:
+ description: URI of the token issuer used for authentication.
+ validTokenType:
type: string
- description: The pod's topology spread constraints.
- priorityClassName:
- type: string
- description: 'The name of the priority class used to assign priority to the pods. '
- schedulerName:
- type: string
- description: "The name of the scheduler used to dispatch this `Pod`. If not specified, the default scheduler will be used."
- hostAliases:
- type: array
- items:
+ description: "Valid value for the `token_type` attribute returned by the Introspection Endpoint. No default value, and not checked by default."
+ required:
+ - type
+ description: Authentication configuration for this listener.
+ configuration:
type: object
properties:
- hostnames:
- type: array
- items:
- type: string
- ip:
- type: string
- description: The pod's HostAliases. HostAliases is an optional list of hosts and IPs that will be injected into the Pod's hosts file if specified.
- enableServiceLinks:
- type: boolean
- description: Indicates whether information about services should be injected into Pod's environment variables.
- tmpDirSizeLimit:
- type: string
- pattern: "^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
- description: Defines the total amount (for example `1Gi`) of local storage required for temporary EmptyDir volume (`/tmp`). Default value is `5Mi`.
- description: Template for Kafka MirrorMaker `Pods`.
- podDisruptionBudget:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata to apply to the `PodDisruptionBudgetTemplate` resource.
- maxUnavailable:
- type: integer
- minimum: 0
- description: "Maximum number of unavailable pods to allow automatic Pod eviction. A Pod eviction is allowed when the `maxUnavailable` number of pods or fewer are unavailable after the eviction. Setting this value to 0 prevents all voluntary evictions, so the pods must be evicted manually. Defaults to 1."
- description: Template for Kafka MirrorMaker `PodDisruptionBudget`.
- mirrorMakerContainer:
- type: object
- properties:
- env:
- type: array
- items:
- type: object
- properties:
- name:
+ brokerCertChainAndKey:
+ type: object
+ properties:
+ secretName:
+ type: string
+ description: The name of the Secret containing the certificate.
+ certificate:
+ type: string
+ description: The name of the file certificate in the Secret.
+ key:
+ type: string
+ description: The name of the private key in the Secret.
+ required:
+ - secretName
+ - certificate
+ - key
+ description: Reference to the `Secret` which holds the certificate and private key pair which will be used for this listener. The certificate can optionally contain the whole chain. This field can be used only with listeners with enabled TLS encryption.
+ class:
type: string
- description: The environment variable key.
- value:
+ description: |-
+ Configures a specific class for `Ingress` and `LoadBalancer` that defines which controller is used. If not specified, the default controller is used.
+
+ * For an `ingress` listener, the operator uses this property to set the `ingressClassName` property in the `Ingress` resources.
+ * For a `loadbalancer` listener, the operator uses this property to set the `loadBalancerClass` property in the `Service` resources.
+
+ For `ingress` and `loadbalancer` listeners only.
+ externalTrafficPolicy:
type: string
- description: The environment variable value.
- description: Environment variables which should be applied to the container.
- securityContext:
- type: object
- properties:
- allowPrivilegeEscalation:
- type: boolean
- appArmorProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
+ enum:
+ - Local
+ - Cluster
+ description: |-
+ Specifies whether the service routes external traffic to cluster-wide or node-local endpoints:
+
+ * `Cluster` may cause a second hop to another node and obscures the client source IP.
+ * `Local` avoids a second hop for `LoadBalancer` and `Nodeport` type services and preserves the client source IP (when supported by the infrastructure).
+
+ If unspecified, Kubernetes uses `Cluster` as the default. For `loadbalancer` or `nodeport` listeners only.
+ loadBalancerSourceRanges:
+ type: array
+ items:
type: string
- capabilities:
- type: object
- properties:
- add:
- type: array
- items:
+ description: "A list of CIDR ranges (for example `10.0.0.0/8` or `130.211.204.1/32`) from which clients can connect to loadbalancer listeners. If supported by the platform, traffic through the loadbalancer is restricted to the specified CIDR ranges. This field is applicable only for loadbalancer type services and is ignored if the cloud provider does not support the feature. For `loadbalancer` listeners only."
+ bootstrap:
+ type: object
+ properties:
+ alternativeNames:
+ type: array
+ items:
+ type: string
+ description: Additional alternative names for the bootstrap service. The alternative names will be added to the list of subject alternative names of the TLS certificates.
+ host:
type: string
- drop:
- type: array
- items:
+ description: Specifies the hostname used for the bootstrap resource. For `route` (optional) or `ingress` (required) listeners only. Ensure the hostname resolves to the Ingress endpoints; no validation is performed by Strimzi.
+ nodePort:
+ type: integer
+ description: Node port for the bootstrap service. For `nodeport` listeners only.
+ loadBalancerIP:
type: string
- privileged:
- type: boolean
- procMount:
- type: string
- readOnlyRootFilesystem:
- type: boolean
- runAsGroup:
- type: integer
- runAsNonRoot:
- type: boolean
- runAsUser:
- type: integer
- seLinuxOptions:
- type: object
- properties:
- level:
- type: string
- role:
- type: string
- type:
- type: string
- user:
- type: string
- seccompProfile:
- type: object
- properties:
- localhostProfile:
+ description: The loadbalancer is requested with the IP address specified in this property. This feature depends on whether the underlying cloud provider supports specifying the `loadBalancerIP` when a load balancer is created. This property is ignored if the cloud provider does not support the feature. For `loadbalancer` listeners only.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: "Annotations added to `Ingress`, `Route`, or `Service` resources. You can use this property to configure DNS providers such as External DNS. For `loadbalancer`, `nodeport`, `route`, or `ingress` listeners only."
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: "Labels added to `Ingress`, `Route`, or `Service` resources. For `loadbalancer`, `nodeport`, `route`, or `ingress` listeners only."
+ externalIPs:
+ type: array
+ items:
+ type: string
+ description: External IPs associated to the nodeport service. These IPs are used by clients external to the Kubernetes cluster to access the Kafka brokers. This property is helpful when `nodeport` without `externalIP` is not sufficient. For example on bare-metal Kubernetes clusters that do not support Loadbalancer service types. For `nodeport` listeners only.
+ description: Bootstrap configuration.
+ brokers:
+ type: array
+ items:
+ type: object
+ properties:
+ broker:
+ type: integer
+ description: ID of the kafka broker (broker identifier). Broker IDs start from 0 and correspond to the number of broker replicas.
+ advertisedHost:
+ type: string
+ description: The host name used in the brokers' `advertised.listeners`.
+ advertisedPort:
+ type: integer
+ description: The port number used in the brokers' `advertised.listeners`.
+ host:
+ type: string
+ description: The broker host. This field will be used in the Ingress resource or in the Route resource to specify the desired hostname. This field can be used only with `route` (optional) or `ingress` (required) type listeners.
+ nodePort:
+ type: integer
+ description: Node port for the per-broker service. This field can be used only with `nodeport` type listener.
+ loadBalancerIP:
+ type: string
+ description: The loadbalancer is requested with the IP address specified in this field. This feature depends on whether the underlying cloud provider supports specifying the `loadBalancerIP` when a load balancer is created. This field is ignored if the cloud provider does not support the feature.This field can be used only with `loadbalancer` type listener.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: "Annotations that will be added to the `Ingress` or `Service` resource. You can use this field to configure DNS providers such as External DNS. This field can be used only with `loadbalancer`, `nodeport`, or `ingress` type listeners."
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: "Labels that will be added to the `Ingress`, `Route`, or `Service` resource. This field can be used only with `loadbalancer`, `nodeport`, `route`, or `ingress` type listeners."
+ externalIPs:
+ type: array
+ items:
+ type: string
+ description: External IPs associated to the nodeport service. These IPs are used by clients external to the Kubernetes cluster to access the Kafka brokers. This field is helpful when `nodeport` without `externalIP` is not sufficient. For example on bare-metal Kubernetes clusters that do not support Loadbalancer service types. This field can only be used with `nodeport` type listener.
+ required:
+ - broker
+ description: Per-broker configurations.
+ ipFamilyPolicy:
+ type: string
+ enum:
+ - SingleStack
+ - PreferDualStack
+ - RequireDualStack
+ description: |-
+ Specifies the IP Family Policy used by the service. Available options are `SingleStack`, `PreferDualStack` and `RequireDualStack`:
+
+ * `SingleStack` is for a single IP family.
+ * `PreferDualStack` is for two IP families on dual-stack configured clusters or a single IP family on single-stack clusters.
+ * `RequireDualStack` fails unless there are two IP families on dual-stack configured clusters.
+
+ If unspecified, Kubernetes will choose the default value based on the service type.
+ ipFamilies:
+ type: array
+ items:
type: string
- type:
+ enum:
+ - IPv4
+ - IPv6
+ description: "Specifies the IP Families used by the service. Available options are `IPv4` and `IPv6`. If unspecified, Kubernetes will choose the default value based on the `ipFamilyPolicy` setting."
+ createBootstrapService:
+ type: boolean
+ description: Whether to create the bootstrap service or not. The bootstrap service is created by default (if not specified differently). This field can be used with the `loadbalancer` listener.
+ finalizers:
+ type: array
+ items:
type: string
- windowsOptions:
+ description: "A list of finalizers configured for the `LoadBalancer` type services created for this listener. If supported by the platform, the finalizer `service.kubernetes.io/load-balancer-cleanup` to make sure that the external load balancer is deleted together with the service.For more information, see https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#garbage-collecting-load-balancers. For `loadbalancer` listeners only."
+ useServiceDnsDomain:
+ type: boolean
+ description: |-
+ Configures whether the Kubernetes service DNS domain should be included in the generated addresses.
+
+ * If set to `false`, the generated addresses do not contain the service DNS domain suffix. For example, `my-cluster-kafka-0.my-cluster-kafka-brokers.myproject.svc`.
+ * If set to `true`, the generated addresses contain the service DNS domain suffix. For example, `my-cluster-kafka-0.my-cluster-kafka-brokers.myproject.svc.cluster.local`.
+
+ The default is `.cluster.local`, but this is customizable using the environment variable `KUBERNETES_SERVICE_DNS_DOMAIN`. For `internal` and `cluster-ip` listeners only.
+ maxConnections:
+ type: integer
+ description: The maximum number of connections we allow for this listener in the broker at any time. New connections are blocked if the limit is reached.
+ maxConnectionCreationRate:
+ type: integer
+ description: The maximum connection creation rate we allow in this listener at any time. New connections will be throttled if the limit is reached.
+ preferredNodePortAddressType:
+ type: string
+ enum:
+ - ExternalIP
+ - ExternalDNS
+ - InternalIP
+ - InternalDNS
+ - Hostname
+ description: |-
+ Defines which address type should be used as the node address. Available types are: `ExternalDNS`, `ExternalIP`, `InternalDNS`, `InternalIP` and `Hostname`. By default, the addresses are used in the following order (the first one found is used):
+
+ * `ExternalDNS`
+ * `ExternalIP`
+ * `InternalDNS`
+ * `InternalIP`
+ * `Hostname`
+
+ This property is used to select the preferred address type, which is checked first. If no address is found for this address type, the other types are checked in the default order.For `nodeport` listeners only.
+ publishNotReadyAddresses:
+ type: boolean
+ description: Configures whether the service endpoints are considered "ready" even if the Pods themselves are not. Defaults to `false`. This field can not be used with `internal` listeners.
+ description: Additional listener configuration.
+ networkPolicyPeers:
+ type: array
+ items:
type: object
properties:
- gmsaCredentialSpec:
- type: string
- gmsaCredentialSpecName:
- type: string
- hostProcess:
- type: boolean
- runAsUserName:
- type: string
- description: Security context for the container.
- description: Template for Kafka MirrorMaker container.
- serviceAccount:
+ ipBlock:
+ type: object
+ properties:
+ cidr:
+ type: string
+ except:
+ type: array
+ items:
+ type: string
+ namespaceSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ podSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ description: "List of peers which should be able to connect to this listener. Peers in this list are combined using a logical OR operation. If this field is empty or missing, all connections will be allowed for this listener. If this field is present and contains at least one item, the listener only allows the traffic which matches at least one item in this list."
+ required:
+ - name
+ - port
+ - type
+ - tls
+ description: Configures listeners to provide access to Kafka brokers.
+ config:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ description: "Kafka broker config properties with the following prefixes cannot be set: listeners, advertised., broker., listener., host.name, port, inter.broker.listener.name, sasl., ssl., security., password., log.dir, zookeeper.connect, zookeeper.set.acl, zookeeper.ssl, zookeeper.clientCnxnSocket, authorizer., super.user, cruise.control.metrics.topic, cruise.control.metrics.reporter.bootstrap.servers, node.id, process.roles, controller., metadata.log.dir, zookeeper.metadata.migration.enable, client.quota.callback.static.kafka.admin., client.quota.callback.static.produce, client.quota.callback.static.fetch, client.quota.callback.static.storage.per.volume.limit.min.available., client.quota.callback.static.excluded.principal.name.list (with the exception of: zookeeper.connection.timeout.ms, sasl.server.max.receive.size, ssl.cipher.suites, ssl.protocol, ssl.enabled.protocols, ssl.secure.random.implementation, cruise.control.metrics.topic.num.partitions, cruise.control.metrics.topic.replication.factor, cruise.control.metrics.topic.retention.ms, cruise.control.metrics.topic.auto.create.retries, cruise.control.metrics.topic.auto.create.timeout.ms, cruise.control.metrics.topic.min.insync.replicas, controller.quorum.election.backoff.max.ms, controller.quorum.election.timeout.ms, controller.quorum.fetch.timeout.ms)."
+ storage:
type: object
properties:
- metadata:
+ class:
+ type: string
+ description: The storage class to use for dynamic volume allocation.
+ deleteClaim:
+ type: boolean
+ description: Specifies if the persistent volume claim has to be deleted when the cluster is un-deployed.
+ id:
+ type: integer
+ minimum: 0
+ description: Storage identification number. It is mandatory only for storage volumes defined in a storage of type 'jbod'.
+ kraftMetadata:
+ type: string
+ enum:
+ - shared
+ description: "Specifies whether this volume should be used for storing KRaft metadata. This property is optional. When set, the only currently supported value is `shared`. At most one volume can have this property set."
+ overrides:
+ type: array
+ items:
+ type: object
+ properties:
+ class:
+ type: string
+ description: The storage class to use for dynamic volume allocation for this broker.
+ broker:
+ type: integer
+ description: Id of the kafka broker (broker identifier).
+ description: Overrides for individual brokers. The `overrides` field allows you to specify a different configuration for different brokers.
+ selector:
+ additionalProperties:
+ type: string
type: object
- properties:
- labels:
- additionalProperties:
+ description: Specifies a specific persistent volume to use. It contains key:value pairs representing labels for selecting such a volume.
+ size:
+ type: string
+ description: "When `type=persistent-claim`, defines the size of the persistent volume claim, such as 100Gi. Mandatory when `type=persistent-claim`."
+ sizeLimit:
+ type: string
+ pattern: "^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
+ description: "When type=ephemeral, defines the total amount of local storage required for this EmptyDir volume (for example 1Gi)."
+ type:
+ type: string
+ enum:
+ - ephemeral
+ - persistent-claim
+ - jbod
+ description: "Storage type, must be either 'ephemeral', 'persistent-claim', or 'jbod'."
+ volumes:
+ type: array
+ items:
+ type: object
+ properties:
+ class:
type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
+ description: The storage class to use for dynamic volume allocation.
+ deleteClaim:
+ type: boolean
+ description: Specifies if the persistent volume claim has to be deleted when the cluster is un-deployed.
+ id:
+ type: integer
+ minimum: 0
+ description: Storage identification number. Mandatory for storage volumes defined with a `jbod` storage type configuration.
+ kraftMetadata:
type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- description: Template for the Kafka MirrorMaker service account.
- description: "Template to specify how Kafka MirrorMaker resources, `Deployments` and `Pods`, are generated."
- livenessProbe:
- type: object
- properties:
- initialDelaySeconds:
- type: integer
- minimum: 0
- description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
- timeoutSeconds:
- type: integer
- minimum: 1
- description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
- periodSeconds:
- type: integer
- minimum: 1
- description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
- successThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
- failureThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
- description: Pod liveness checking.
- readinessProbe:
- type: object
- properties:
- initialDelaySeconds:
- type: integer
- minimum: 0
- description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
- timeoutSeconds:
- type: integer
- minimum: 1
- description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
- periodSeconds:
- type: integer
- minimum: 1
- description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
- successThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
- failureThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
- description: Pod readiness checking.
- oneOf:
- - properties:
- include: {}
- required:
- - include
- - properties:
- whitelist: {}
- required:
- - whitelist
- required:
- - replicas
- - consumer
- - producer
- description: The specification of Kafka MirrorMaker.
- status:
- type: object
- properties:
- conditions:
- type: array
- items:
- type: object
- properties:
- type:
- type: string
- description: "The unique identifier of a condition, used to distinguish between other conditions in the resource."
- status:
- type: string
- description: "The status of the condition, either True, False or Unknown."
- lastTransitionTime:
- type: string
- description: "Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone."
- reason:
- type: string
- description: The reason for the condition's last transition (a single word in CamelCase).
- message:
- type: string
- description: Human-readable message indicating details about the condition's last transition.
- description: List of status conditions.
- observedGeneration:
- type: integer
- description: The generation of the CRD that was last reconciled by the operator.
- labelSelector:
- type: string
- description: Label selector for pods providing this resource.
- replicas:
- type: integer
- description: The current number of pods being used to provide this resource.
- description: The status of Kafka MirrorMaker.
-
----
-apiVersion: apiextensions.k8s.io/v1
-kind: CustomResourceDefinition
-metadata:
- name: kafkaconnectors.kafka.strimzi.io
- labels:
- app: strimzi
- strimzi.io/crd-install: "true"
-spec:
- group: kafka.strimzi.io
- names:
- kind: KafkaConnector
- listKind: KafkaConnectorList
- singular: kafkaconnector
- plural: kafkaconnectors
- shortNames:
- - kctr
- categories:
- - strimzi
- scope: Namespaced
- conversion:
- strategy: None
- versions:
- - name: v1beta2
- served: true
- storage: true
- subresources:
- status: {}
- scale:
- specReplicasPath: .spec.tasksMax
- statusReplicasPath: .status.tasksMax
- additionalPrinterColumns:
- - name: Cluster
- description: The name of the Kafka Connect cluster this connector belongs to
- jsonPath: .metadata.labels.strimzi\.io/cluster
- type: string
- - name: Connector class
- description: The class used by this connector
- jsonPath: .spec.class
- type: string
- - name: Max Tasks
- description: Maximum number of tasks
- jsonPath: .spec.tasksMax
- type: integer
- - name: Ready
- description: The state of the custom resource
- jsonPath: ".status.conditions[?(@.type==\"Ready\")].status"
- type: string
- schema:
- openAPIV3Schema:
- type: object
- properties:
- apiVersion:
- type: string
- description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources"
- kind:
- type: string
- description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
- metadata:
- type: object
- spec:
- type: object
- properties:
- class:
- type: string
- description: The Class for the Kafka Connector.
- tasksMax:
- type: integer
- minimum: 1
- description: The maximum number of tasks for the Kafka Connector.
- autoRestart:
- type: object
- properties:
- enabled:
- type: boolean
- description: Whether automatic restart for failed connectors and tasks should be enabled or disabled.
- maxRestarts:
- type: integer
- description: "The maximum number of connector restarts that the operator will try. If the connector remains in a failed state after reaching this limit, it must be restarted manually by the user. Defaults to an unlimited number of restarts."
- description: Automatic restart of connector and tasks configuration.
- config:
- x-kubernetes-preserve-unknown-fields: true
- type: object
- description: "The Kafka Connector configuration. The following properties cannot be set: name, connector.class, tasks.max."
- pause:
- type: boolean
- description: Whether the connector should be paused. Defaults to false.
- state:
- type: string
- enum:
- - paused
- - stopped
- - running
- description: The state the connector should be in. Defaults to running.
- description: The specification of the Kafka Connector.
- status:
- type: object
- properties:
- conditions:
- type: array
- items:
- type: object
- properties:
- type:
- type: string
- description: "The unique identifier of a condition, used to distinguish between other conditions in the resource."
- status:
- type: string
- description: "The status of the condition, either True, False or Unknown."
- lastTransitionTime:
- type: string
- description: "Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone."
- reason:
- type: string
- description: The reason for the condition's last transition (a single word in CamelCase).
- message:
- type: string
- description: Human-readable message indicating details about the condition's last transition.
- description: List of status conditions.
- observedGeneration:
- type: integer
- description: The generation of the CRD that was last reconciled by the operator.
- autoRestart:
- type: object
- properties:
- count:
- type: integer
- description: The number of times the connector or task is restarted.
- connectorName:
- type: string
- description: The name of the connector being restarted.
- lastRestartTimestamp:
- type: string
- description: The last time the automatic restart was attempted. The required format is 'yyyy-MM-ddTHH:mm:ssZ' in the UTC time zone.
- description: The auto restart status.
- connectorStatus:
- x-kubernetes-preserve-unknown-fields: true
- type: object
- description: "The connector status, as reported by the Kafka Connect REST API."
- tasksMax:
- type: integer
- description: The maximum number of tasks for the Kafka Connector.
- topics:
- type: array
- items:
- type: string
- description: The list of topics used by the Kafka Connector.
- description: The status of the Kafka Connector.
-
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
- name: strimzi-kafka-broker
- labels:
- app: strimzi
-rules:
- - apiGroups:
- - ""
- resources:
- # The Kafka Brokers require "get" permissions to view the node they are on
- # This information is used to generate a Rack ID that is used for High Availability configurations
- - nodes
- verbs:
- - get
-
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
- name: strimzi-cluster-operator-global
- labels:
- app: strimzi
-rules:
- - apiGroups:
- - "rbac.authorization.k8s.io"
- resources:
- # The cluster operator needs to create and manage cluster role bindings in the case of an install where a user
- # has specified they want their cluster role bindings generated
- - clusterrolebindings
- verbs:
- - get
- - list
- - watch
- - create
- - delete
- - patch
- - update
- - apiGroups:
- - storage.k8s.io
- resources:
- # The cluster operator requires "get" permissions to view storage class details
- # This is because only a persistent volume of a supported storage class type can be resized
- - storageclasses
- verbs:
- - get
- - apiGroups:
- - ""
- resources:
- # The cluster operator requires "list" permissions to view all nodes in a cluster
- # The listing is used to determine the node addresses when NodePort access is configured
- # These addresses are then exposed in the custom resource states
- - nodes
- verbs:
- - list
-
----
-apiVersion: apiextensions.k8s.io/v1
-kind: CustomResourceDefinition
-metadata:
- name: kafkatopics.kafka.strimzi.io
- labels:
- app: strimzi
- strimzi.io/crd-install: "true"
-spec:
- group: kafka.strimzi.io
- names:
- kind: KafkaTopic
- listKind: KafkaTopicList
- singular: kafkatopic
- plural: kafkatopics
- shortNames:
- - kt
- categories:
- - strimzi
- scope: Namespaced
- conversion:
- strategy: None
- versions:
- - name: v1beta2
- served: true
- storage: true
- subresources:
- status: {}
- additionalPrinterColumns:
- - name: Cluster
- description: The name of the Kafka cluster this topic belongs to
- jsonPath: .metadata.labels.strimzi\.io/cluster
- type: string
- - name: Partitions
- description: The desired number of partitions in the topic
- jsonPath: .spec.partitions
- type: integer
- - name: Replication factor
- description: The desired number of replicas of each partition
- jsonPath: .spec.replicas
- type: integer
- - name: Ready
- description: The state of the custom resource
- jsonPath: ".status.conditions[?(@.type==\"Ready\")].status"
- type: string
- schema:
- openAPIV3Schema:
- type: object
- properties:
- apiVersion:
- type: string
- description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources"
- kind:
- type: string
- description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
- metadata:
- type: object
- spec:
- type: object
- properties:
- topicName:
- type: string
- description: The name of the topic. When absent this will default to the metadata.name of the topic. It is recommended to not set this unless the topic name is not a valid Kubernetes resource name.
- partitions:
- type: integer
- minimum: 1
- description: "The number of partitions the topic should have. This cannot be decreased after topic creation. It can be increased after topic creation, but it is important to understand the consequences that has, especially for topics with semantic partitioning. When absent this will default to the broker configuration for `num.partitions`."
- replicas:
- type: integer
- minimum: 1
- maximum: 32767
- description: The number of replicas the topic should have. When absent this will default to the broker configuration for `default.replication.factor`.
- config:
- x-kubernetes-preserve-unknown-fields: true
- type: object
- description: The topic configuration.
- description: The specification of the topic.
- status:
- type: object
- properties:
- conditions:
- type: array
- items:
- type: object
- properties:
- type:
- type: string
- description: "The unique identifier of a condition, used to distinguish between other conditions in the resource."
- status:
- type: string
- description: "The status of the condition, either True, False or Unknown."
- lastTransitionTime:
- type: string
- description: "Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone."
- reason:
- type: string
- description: The reason for the condition's last transition (a single word in CamelCase).
- message:
- type: string
- description: Human-readable message indicating details about the condition's last transition.
- description: List of status conditions.
- observedGeneration:
- type: integer
- description: The generation of the CRD that was last reconciled by the operator.
- topicName:
- type: string
- description: Topic name.
- topicId:
- type: string
- description: "The topic's id. For a KafkaTopic with the ready condition, this will change only if the topic gets deleted and recreated with the same name."
- replicasChange:
- type: object
- properties:
- targetReplicas:
- type: integer
- description: The target replicas value requested by the user. This may be different from .spec.replicas when a change is ongoing.
- state:
- type: string
- enum:
- - pending
- - ongoing
- description: "Current state of the replicas change operation. This can be `pending`, when the change has been requested, or `ongoing`, when the change has been successfully submitted to Cruise Control."
- message:
- type: string
- description: Message for the user related to the replicas change request. This may contain transient error messages that would disappear on periodic reconciliations.
- sessionId:
- type: string
- description: The session identifier for replicas change requests pertaining to this KafkaTopic resource. This is used by the Topic Operator to track the status of `ongoing` replicas change operations.
- description: Replication factor change status.
- description: The status of the topic.
- - name: v1beta1
- served: true
- storage: false
- subresources:
- status: {}
- additionalPrinterColumns:
- - name: Cluster
- description: The name of the Kafka cluster this topic belongs to
- jsonPath: .metadata.labels.strimzi\.io/cluster
- type: string
- - name: Partitions
- description: The desired number of partitions in the topic
- jsonPath: .spec.partitions
- type: integer
- - name: Replication factor
- description: The desired number of replicas of each partition
- jsonPath: .spec.replicas
- type: integer
- - name: Ready
- description: The state of the custom resource
- jsonPath: ".status.conditions[?(@.type==\"Ready\")].status"
- type: string
- schema:
- openAPIV3Schema:
- type: object
- properties:
- apiVersion:
- type: string
- description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources"
- kind:
- type: string
- description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
- metadata:
- type: object
- spec:
- type: object
- properties:
- topicName:
- type: string
- description: The name of the topic. When absent this will default to the metadata.name of the topic. It is recommended to not set this unless the topic name is not a valid Kubernetes resource name.
- partitions:
- type: integer
- minimum: 1
- description: "The number of partitions the topic should have. This cannot be decreased after topic creation. It can be increased after topic creation, but it is important to understand the consequences that has, especially for topics with semantic partitioning. When absent this will default to the broker configuration for `num.partitions`."
- replicas:
- type: integer
- minimum: 1
- maximum: 32767
- description: The number of replicas the topic should have. When absent this will default to the broker configuration for `default.replication.factor`.
- config:
- x-kubernetes-preserve-unknown-fields: true
- type: object
- description: The topic configuration.
- description: The specification of the topic.
- status:
- type: object
- properties:
- conditions:
- type: array
- items:
- type: object
- properties:
- type:
- type: string
- description: "The unique identifier of a condition, used to distinguish between other conditions in the resource."
- status:
- type: string
- description: "The status of the condition, either True, False or Unknown."
- lastTransitionTime:
- type: string
- description: "Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone."
- reason:
- type: string
- description: The reason for the condition's last transition (a single word in CamelCase).
- message:
- type: string
- description: Human-readable message indicating details about the condition's last transition.
- description: List of status conditions.
- observedGeneration:
- type: integer
- description: The generation of the CRD that was last reconciled by the operator.
- topicName:
- type: string
- description: Topic name.
- topicId:
- type: string
- description: "The topic's id. For a KafkaTopic with the ready condition, this will change only if the topic gets deleted and recreated with the same name."
- replicasChange:
- type: object
- properties:
- targetReplicas:
- type: integer
- description: The target replicas value requested by the user. This may be different from .spec.replicas when a change is ongoing.
- state:
- type: string
- enum:
- - pending
- - ongoing
- description: "Current state of the replicas change operation. This can be `pending`, when the change has been requested, or `ongoing`, when the change has been successfully submitted to Cruise Control."
- message:
- type: string
- description: Message for the user related to the replicas change request. This may contain transient error messages that would disappear on periodic reconciliations.
- sessionId:
- type: string
- description: The session identifier for replicas change requests pertaining to this KafkaTopic resource. This is used by the Topic Operator to track the status of `ongoing` replicas change operations.
- description: Replication factor change status.
- description: The status of the topic.
- - name: v1alpha1
- served: true
- storage: false
- subresources:
- status: {}
- additionalPrinterColumns:
- - name: Cluster
- description: The name of the Kafka cluster this topic belongs to
- jsonPath: .metadata.labels.strimzi\.io/cluster
- type: string
- - name: Partitions
- description: The desired number of partitions in the topic
- jsonPath: .spec.partitions
- type: integer
- - name: Replication factor
- description: The desired number of replicas of each partition
- jsonPath: .spec.replicas
- type: integer
- - name: Ready
- description: The state of the custom resource
- jsonPath: ".status.conditions[?(@.type==\"Ready\")].status"
- type: string
- schema:
- openAPIV3Schema:
- type: object
- properties:
- apiVersion:
- type: string
- description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources"
- kind:
- type: string
- description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
- metadata:
- type: object
- spec:
- type: object
- properties:
- topicName:
- type: string
- description: The name of the topic. When absent this will default to the metadata.name of the topic. It is recommended to not set this unless the topic name is not a valid Kubernetes resource name.
- partitions:
- type: integer
- minimum: 1
- description: "The number of partitions the topic should have. This cannot be decreased after topic creation. It can be increased after topic creation, but it is important to understand the consequences that has, especially for topics with semantic partitioning. When absent this will default to the broker configuration for `num.partitions`."
- replicas:
- type: integer
- minimum: 1
- maximum: 32767
- description: The number of replicas the topic should have. When absent this will default to the broker configuration for `default.replication.factor`.
- config:
- x-kubernetes-preserve-unknown-fields: true
- type: object
- description: The topic configuration.
- description: The specification of the topic.
- status:
- type: object
- properties:
- conditions:
- type: array
- items:
- type: object
- properties:
- type:
- type: string
- description: "The unique identifier of a condition, used to distinguish between other conditions in the resource."
- status:
- type: string
- description: "The status of the condition, either True, False or Unknown."
- lastTransitionTime:
- type: string
- description: "Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone."
- reason:
- type: string
- description: The reason for the condition's last transition (a single word in CamelCase).
- message:
- type: string
- description: Human-readable message indicating details about the condition's last transition.
- description: List of status conditions.
- observedGeneration:
- type: integer
- description: The generation of the CRD that was last reconciled by the operator.
- topicName:
- type: string
- description: Topic name.
- topicId:
- type: string
- description: "The topic's id. For a KafkaTopic with the ready condition, this will change only if the topic gets deleted and recreated with the same name."
- replicasChange:
- type: object
- properties:
- targetReplicas:
- type: integer
- description: The target replicas value requested by the user. This may be different from .spec.replicas when a change is ongoing.
- state:
- type: string
- enum:
- - pending
- - ongoing
- description: "Current state of the replicas change operation. This can be `pending`, when the change has been requested, or `ongoing`, when the change has been successfully submitted to Cruise Control."
- message:
- type: string
- description: Message for the user related to the replicas change request. This may contain transient error messages that would disappear on periodic reconciliations.
- sessionId:
- type: string
- description: The session identifier for replicas change requests pertaining to this KafkaTopic resource. This is used by the Topic Operator to track the status of `ongoing` replicas change operations.
- description: Replication factor change status.
- description: The status of the topic.
-
----
-kind: ConfigMap
-apiVersion: v1
-metadata:
- name: strimzi-cluster-operator
- labels:
- app: strimzi
-data:
- log4j2.properties: |
- name = COConfig
- monitorInterval = 30
-
- appender.console.type = Console
- appender.console.name = STDOUT
- appender.console.layout.type = PatternLayout
- appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
-
- rootLogger.level = ${env:STRIMZI_LOG_LEVEL:-INFO}
- rootLogger.appenderRefs = stdout
- rootLogger.appenderRef.console.ref = STDOUT
-
- # Kafka AdminClient logging is a bit noisy at INFO level
- logger.kafka.name = org.apache.kafka
- logger.kafka.level = WARN
-
- # Zookeeper is very verbose even on INFO level -> We set it to WARN by default
- logger.zookeepertrustmanager.name = org.apache.zookeeper
- logger.zookeepertrustmanager.level = WARN
-
- # Keeps separate level for Netty logging -> to not be changed by the root logger
- logger.netty.name = io.netty
- logger.netty.level = INFO
-
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
- name: strimzi-cluster-operator
- labels:
- app: strimzi
-subjects:
- - kind: ServiceAccount
- name: strimzi-cluster-operator
- namespace: myproject
-roleRef:
- kind: ClusterRole
- name: strimzi-cluster-operator-global
- apiGroup: rbac.authorization.k8s.io
-
----
-apiVersion: apiextensions.k8s.io/v1
-kind: CustomResourceDefinition
-metadata:
- name: kafkaconnects.kafka.strimzi.io
- labels:
- app: strimzi
- strimzi.io/crd-install: "true"
-spec:
- group: kafka.strimzi.io
- names:
- kind: KafkaConnect
- listKind: KafkaConnectList
- singular: kafkaconnect
- plural: kafkaconnects
- shortNames:
- - kc
- categories:
- - strimzi
- scope: Namespaced
- conversion:
- strategy: None
- versions:
- - name: v1beta2
- served: true
- storage: true
- subresources:
- status: {}
- scale:
- specReplicasPath: .spec.replicas
- statusReplicasPath: .status.replicas
- labelSelectorPath: .status.labelSelector
- additionalPrinterColumns:
- - name: Desired replicas
- description: The desired number of Kafka Connect replicas
- jsonPath: .spec.replicas
- type: integer
- - name: Ready
- description: The state of the custom resource
- jsonPath: ".status.conditions[?(@.type==\"Ready\")].status"
- type: string
- schema:
- openAPIV3Schema:
- type: object
- properties:
- apiVersion:
- type: string
- description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources"
- kind:
- type: string
- description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
- metadata:
- type: object
- spec:
- type: object
- properties:
- version:
- type: string
- description: The Kafka Connect version. Defaults to the latest version. Consult the user documentation to understand the process required to upgrade or downgrade the version.
- replicas:
- type: integer
- description: The number of pods in the Kafka Connect group. Defaults to `3`.
- image:
- type: string
- description: "The container image used for Kafka Connect pods. If no image name is explicitly specified, it is determined based on the `spec.version` configuration. The image names are specifically mapped to corresponding versions in the Cluster Operator configuration."
- bootstrapServers:
- type: string
- description: Bootstrap servers to connect to. This should be given as a comma separated list of __:__ pairs.
- tls:
- type: object
- properties:
- trustedCertificates:
- type: array
- items:
- type: object
- properties:
- secretName:
- type: string
- description: The name of the Secret containing the certificate.
- certificate:
- type: string
- description: The name of the file certificate in the secret.
- pattern:
- type: string
- description: "Pattern for the certificate files in the secret. Use the link:https://en.wikipedia.org/wiki/Glob_(programming)[_glob syntax_] for the pattern. All files in the secret that match the pattern are used."
- oneOf:
- - properties:
- certificate: {}
- required:
- - certificate
- - properties:
- pattern: {}
- required:
- - pattern
- required:
- - secretName
- description: Trusted certificates for TLS connection.
- description: TLS configuration.
- authentication:
- type: object
- properties:
- accessToken:
- type: object
- properties:
- key:
- type: string
- description: The key under which the secret value is stored in the Kubernetes Secret.
- secretName:
- type: string
- description: The name of the Kubernetes Secret containing the secret value.
- required:
- - key
- - secretName
- description: Link to Kubernetes Secret containing the access token which was obtained from the authorization server.
- accessTokenIsJwt:
- type: boolean
- description: Configure whether access token should be treated as JWT. This should be set to `false` if the authorization server returns opaque tokens. Defaults to `true`.
- audience:
- type: string
- description: "OAuth audience to use when authenticating against the authorization server. Some authorization servers require the audience to be explicitly set. The possible values depend on how the authorization server is configured. By default, `audience` is not specified when performing the token endpoint request."
- certificateAndKey:
- type: object
- properties:
- secretName:
- type: string
- description: The name of the Secret containing the certificate.
- certificate:
- type: string
- description: The name of the file certificate in the Secret.
- key:
- type: string
- description: The name of the private key in the Secret.
- required:
- - secretName
- - certificate
- - key
- description: Reference to the `Secret` which holds the certificate and private key pair.
- clientId:
- type: string
- description: OAuth Client ID which the Kafka client can use to authenticate against the OAuth server and use the token endpoint URI.
- clientSecret:
- type: object
- properties:
- key:
- type: string
- description: The key under which the secret value is stored in the Kubernetes Secret.
- secretName:
- type: string
- description: The name of the Kubernetes Secret containing the secret value.
- required:
- - key
- - secretName
- description: Link to Kubernetes Secret containing the OAuth client secret which the Kafka client can use to authenticate against the OAuth server and use the token endpoint URI.
- connectTimeoutSeconds:
- type: integer
- description: "The connect timeout in seconds when connecting to authorization server. If not set, the effective connect timeout is 60 seconds."
- disableTlsHostnameVerification:
- type: boolean
- description: Enable or disable TLS hostname verification. Default value is `false`.
- enableMetrics:
- type: boolean
- description: Enable or disable OAuth metrics. Default value is `false`.
- httpRetries:
- type: integer
- description: "The maximum number of retries to attempt if an initial HTTP request fails. If not set, the default is to not attempt any retries."
- httpRetryPauseMs:
- type: integer
- description: "The pause to take before retrying a failed HTTP request. If not set, the default is to not pause at all but to immediately repeat a request."
- includeAcceptHeader:
- type: boolean
- description: Whether the Accept header should be set in requests to the authorization servers. The default value is `true`.
- maxTokenExpirySeconds:
- type: integer
- description: Set or limit time-to-live of the access tokens to the specified number of seconds. This should be set if the authorization server returns opaque tokens.
- passwordSecret:
- type: object
- properties:
- secretName:
- type: string
- description: The name of the Secret containing the password.
- password:
- type: string
- description: The name of the key in the Secret under which the password is stored.
- required:
- - secretName
- - password
- description: Reference to the `Secret` which holds the password.
- readTimeoutSeconds:
- type: integer
- description: "The read timeout in seconds when connecting to authorization server. If not set, the effective read timeout is 60 seconds."
- refreshToken:
- type: object
- properties:
- key:
- type: string
- description: The key under which the secret value is stored in the Kubernetes Secret.
- secretName:
- type: string
- description: The name of the Kubernetes Secret containing the secret value.
- required:
- - key
- - secretName
- description: Link to Kubernetes Secret containing the refresh token which can be used to obtain access token from the authorization server.
- scope:
- type: string
- description: OAuth scope to use when authenticating against the authorization server. Some authorization servers require this to be set. The possible values depend on how authorization server is configured. By default `scope` is not specified when doing the token endpoint request.
- tlsTrustedCertificates:
- type: array
- items:
- type: object
- properties:
- secretName:
- type: string
- description: The name of the Secret containing the certificate.
- certificate:
- type: string
- description: The name of the file certificate in the secret.
- pattern:
- type: string
- description: "Pattern for the certificate files in the secret. Use the link:https://en.wikipedia.org/wiki/Glob_(programming)[_glob syntax_] for the pattern. All files in the secret that match the pattern are used."
- oneOf:
- - properties:
- certificate: {}
- required:
- - certificate
- - properties:
- pattern: {}
- required:
- - pattern
- required:
- - secretName
- description: Trusted certificates for TLS connection to the OAuth server.
- tokenEndpointUri:
- type: string
- description: Authorization server token endpoint URI.
- type:
- type: string
- enum:
- - tls
- - scram-sha-256
- - scram-sha-512
- - plain
- - oauth
- description: "Authentication type. Currently the supported types are `tls`, `scram-sha-256`, `scram-sha-512`, `plain`, and 'oauth'. `scram-sha-256` and `scram-sha-512` types use SASL SCRAM-SHA-256 and SASL SCRAM-SHA-512 Authentication, respectively. `plain` type uses SASL PLAIN Authentication. `oauth` type uses SASL OAUTHBEARER Authentication. The `tls` type uses TLS Client Authentication. The `tls` type is supported only over TLS connections."
- username:
- type: string
- description: Username used for the authentication.
- required:
- - type
- description: Authentication configuration for Kafka Connect.
- config:
- x-kubernetes-preserve-unknown-fields: true
- type: object
- description: "The Kafka Connect configuration. Properties with the following prefixes cannot be set: ssl., sasl., security., listeners, plugin.path, rest., bootstrap.servers, consumer.interceptor.classes, producer.interceptor.classes (with the exception of: ssl.endpoint.identification.algorithm, ssl.cipher.suites, ssl.protocol, ssl.enabled.protocols)."
- resources:
- type: object
- properties:
- claims:
- type: array
- items:
- type: object
- properties:
- name:
- type: string
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
- x-kubernetes-int-or-string: true
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
- x-kubernetes-int-or-string: true
- type: object
- description: The maximum limits for CPU and memory resources and the requested initial resources.
- livenessProbe:
- type: object
- properties:
- initialDelaySeconds:
- type: integer
- minimum: 0
- description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
- timeoutSeconds:
- type: integer
- minimum: 1
- description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
- periodSeconds:
- type: integer
- minimum: 1
- description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
- successThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
- failureThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
- description: Pod liveness checking.
- readinessProbe:
- type: object
- properties:
- initialDelaySeconds:
- type: integer
- minimum: 0
- description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
- timeoutSeconds:
- type: integer
- minimum: 1
- description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
- periodSeconds:
- type: integer
- minimum: 1
- description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
- successThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
- failureThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
- description: Pod readiness checking.
- jvmOptions:
- type: object
- properties:
- "-XX":
- additionalProperties:
- type: string
- type: object
- description: A map of -XX options to the JVM.
- "-Xmx":
- type: string
- pattern: "^[0-9]+[mMgG]?$"
- description: -Xmx option to to the JVM.
- "-Xms":
- type: string
- pattern: "^[0-9]+[mMgG]?$"
- description: -Xms option to to the JVM.
- gcLoggingEnabled:
- type: boolean
- description: Specifies whether the Garbage Collection logging is enabled. The default is false.
- javaSystemProperties:
- type: array
- items:
- type: object
- properties:
- name:
- type: string
- description: The system property name.
- value:
- type: string
- description: The system property value.
- description: A map of additional system properties which will be passed using the `-D` option to the JVM.
- description: JVM Options for pods.
- jmxOptions:
- type: object
- properties:
- authentication:
- type: object
- properties:
- type:
- type: string
- enum:
- - password
- description: Authentication type. Currently the only supported types are `password`.`password` type creates a username and protected port with no TLS.
- required:
- - type
- description: Authentication configuration for connecting to the JMX port.
- description: JMX Options.
- logging:
- type: object
- properties:
- loggers:
- additionalProperties:
- type: string
- type: object
- description: A Map from logger name to logger level.
- type:
- type: string
- enum:
- - inline
- - external
- description: "Logging type, must be either 'inline' or 'external'."
- valueFrom:
- type: object
- properties:
- configMapKeyRef:
- type: object
- properties:
- key:
- type: string
- name:
- type: string
- optional:
- type: boolean
- description: Reference to the key in the ConfigMap containing the configuration.
- description: '`ConfigMap` entry where the logging configuration is stored. '
- required:
- - type
- description: Logging configuration for Kafka Connect.
- clientRackInitImage:
- type: string
- description: The image of the init container used for initializing the `client.rack`.
- rack:
- type: object
- properties:
- topologyKey:
- type: string
- example: topology.kubernetes.io/zone
- description: "A key that matches labels assigned to the Kubernetes cluster nodes. The value of the label is used to set a broker's `broker.rack` config, and the `client.rack` config for Kafka Connect or MirrorMaker 2."
- required:
- - topologyKey
- description: Configuration of the node label which will be used as the `client.rack` consumer configuration.
- metricsConfig:
- type: object
- properties:
- type:
- type: string
- enum:
- - jmxPrometheusExporter
- description: Metrics type. Only 'jmxPrometheusExporter' supported currently.
- valueFrom:
- type: object
- properties:
- configMapKeyRef:
- type: object
- properties:
- key:
- type: string
- name:
- type: string
- optional:
- type: boolean
- description: Reference to the key in the ConfigMap containing the configuration.
- description: 'ConfigMap entry where the Prometheus JMX Exporter configuration is stored. '
- required:
- - type
- - valueFrom
- description: Metrics configuration.
- tracing:
- type: object
- properties:
- type:
- type: string
- enum:
- - jaeger
- - opentelemetry
- description: "Type of the tracing used. Currently the only supported type is `opentelemetry` for OpenTelemetry tracing. As of Strimzi 0.37.0, `jaeger` type is not supported anymore and this option is ignored."
- required:
- - type
- description: The configuration of tracing in Kafka Connect.
- template:
- type: object
- properties:
- deployment:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- deploymentStrategy:
- type: string
- enum:
- - RollingUpdate
- - Recreate
- description: Pod replacement strategy for deployment configuration changes. Valid values are `RollingUpdate` and `Recreate`. Defaults to `RollingUpdate`.
- description: Template for Kafka Connect `Deployment`.
- podSet:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- description: Template for Kafka Connect `StrimziPodSet` resource.
- pod:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- imagePullSecrets:
- type: array
- items:
- type: object
- properties:
- name:
- type: string
- description: "List of references to secrets in the same namespace to use for pulling any of the images used by this Pod. When the `STRIMZI_IMAGE_PULL_SECRETS` environment variable in Cluster Operator and the `imagePullSecrets` option are specified, only the `imagePullSecrets` variable is used and the `STRIMZI_IMAGE_PULL_SECRETS` variable is ignored."
- securityContext:
- type: object
- properties:
- appArmorProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- fsGroup:
- type: integer
- fsGroupChangePolicy:
- type: string
- runAsGroup:
- type: integer
- runAsNonRoot:
- type: boolean
- runAsUser:
- type: integer
- seLinuxOptions:
- type: object
- properties:
- level:
- type: string
- role:
- type: string
- type:
- type: string
- user:
- type: string
- seccompProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- supplementalGroups:
- type: array
- items:
- type: integer
- sysctls:
- type: array
- items:
- type: object
- properties:
- name:
- type: string
- value:
- type: string
- windowsOptions:
- type: object
- properties:
- gmsaCredentialSpec:
- type: string
- gmsaCredentialSpecName:
- type: string
- hostProcess:
- type: boolean
- runAsUserName:
- type: string
- description: Configures pod-level security attributes and common container settings.
- terminationGracePeriodSeconds:
- type: integer
- minimum: 0
- description: "The grace period is the duration in seconds after the processes running in the pod are sent a termination signal, and the time when the processes are forcibly halted with a kill signal. Set this value to longer than the expected cleanup time for your process. Value must be a non-negative integer. A zero value indicates delete immediately. You might need to increase the grace period for very large Kafka clusters, so that the Kafka brokers have enough time to transfer their work to another broker before they are terminated. Defaults to 30 seconds."
- affinity:
- type: object
- properties:
- nodeAffinity:
- type: object
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- type: array
- items:
- type: object
- properties:
- preference:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchFields:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- weight:
- type: integer
- requiredDuringSchedulingIgnoredDuringExecution:
- type: object
- properties:
- nodeSelectorTerms:
- type: array
- items:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchFields:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- podAffinity:
- type: object
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- type: array
- items:
- type: object
- properties:
- podAffinityTerm:
- type: object
- properties:
- labelSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- matchLabelKeys:
- type: array
- items:
- type: string
- mismatchLabelKeys:
- type: array
- items:
- type: string
- namespaceSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- namespaces:
- type: array
- items:
- type: string
- topologyKey:
- type: string
- weight:
- type: integer
- requiredDuringSchedulingIgnoredDuringExecution:
- type: array
- items:
- type: object
- properties:
- labelSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- matchLabelKeys:
- type: array
- items:
- type: string
- mismatchLabelKeys:
- type: array
- items:
- type: string
- namespaceSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- namespaces:
- type: array
- items:
- type: string
- topologyKey:
- type: string
- podAntiAffinity:
- type: object
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- type: array
- items:
- type: object
- properties:
- podAffinityTerm:
- type: object
- properties:
- labelSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- matchLabelKeys:
- type: array
- items:
- type: string
- mismatchLabelKeys:
- type: array
- items:
- type: string
- namespaceSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- namespaces:
- type: array
- items:
- type: string
- topologyKey:
- type: string
- weight:
- type: integer
- requiredDuringSchedulingIgnoredDuringExecution:
- type: array
- items:
- type: object
- properties:
- labelSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- matchLabelKeys:
- type: array
- items:
- type: string
- mismatchLabelKeys:
- type: array
- items:
- type: string
- namespaceSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- namespaces:
- type: array
- items:
- type: string
- topologyKey:
- type: string
- description: The pod's affinity rules.
- tolerations:
- type: array
- items:
- type: object
- properties:
- effect:
- type: string
- key:
- type: string
- operator:
- type: string
- tolerationSeconds:
- type: integer
- value:
- type: string
- description: The pod's tolerations.
- topologySpreadConstraints:
- type: array
- items:
- type: object
- properties:
- labelSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- matchLabelKeys:
- type: array
- items:
- type: string
- maxSkew:
- type: integer
- minDomains:
- type: integer
- nodeAffinityPolicy:
- type: string
- nodeTaintsPolicy:
- type: string
- topologyKey:
- type: string
- whenUnsatisfiable:
- type: string
- description: The pod's topology spread constraints.
- priorityClassName:
- type: string
- description: 'The name of the priority class used to assign priority to the pods. '
- schedulerName:
- type: string
- description: "The name of the scheduler used to dispatch this `Pod`. If not specified, the default scheduler will be used."
- hostAliases:
- type: array
- items:
- type: object
- properties:
- hostnames:
- type: array
- items:
- type: string
- ip:
- type: string
- description: The pod's HostAliases. HostAliases is an optional list of hosts and IPs that will be injected into the Pod's hosts file if specified.
- enableServiceLinks:
- type: boolean
- description: Indicates whether information about services should be injected into Pod's environment variables.
- tmpDirSizeLimit:
- type: string
- pattern: "^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
- description: Defines the total amount (for example `1Gi`) of local storage required for temporary EmptyDir volume (`/tmp`). Default value is `5Mi`.
- description: Template for Kafka Connect `Pods`.
- apiService:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- ipFamilyPolicy:
- type: string
- enum:
- - SingleStack
- - PreferDualStack
- - RequireDualStack
- description: "Specifies the IP Family Policy used by the service. Available options are `SingleStack`, `PreferDualStack` and `RequireDualStack`. `SingleStack` is for a single IP family. `PreferDualStack` is for two IP families on dual-stack configured clusters or a single IP family on single-stack clusters. `RequireDualStack` fails unless there are two IP families on dual-stack configured clusters. If unspecified, Kubernetes will choose the default value based on the service type."
- ipFamilies:
- type: array
- items:
- type: string
- enum:
- - IPv4
- - IPv6
- description: "Specifies the IP Families used by the service. Available options are `IPv4` and `IPv6`. If unspecified, Kubernetes will choose the default value based on the `ipFamilyPolicy` setting."
- description: Template for Kafka Connect API `Service`.
- headlessService:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- ipFamilyPolicy:
- type: string
- enum:
- - SingleStack
- - PreferDualStack
- - RequireDualStack
- description: "Specifies the IP Family Policy used by the service. Available options are `SingleStack`, `PreferDualStack` and `RequireDualStack`. `SingleStack` is for a single IP family. `PreferDualStack` is for two IP families on dual-stack configured clusters or a single IP family on single-stack clusters. `RequireDualStack` fails unless there are two IP families on dual-stack configured clusters. If unspecified, Kubernetes will choose the default value based on the service type."
- ipFamilies:
- type: array
- items:
- type: string
- enum:
- - IPv4
- - IPv6
- description: "Specifies the IP Families used by the service. Available options are `IPv4` and `IPv6`. If unspecified, Kubernetes will choose the default value based on the `ipFamilyPolicy` setting."
- description: Template for Kafka Connect headless `Service`.
- connectContainer:
- type: object
- properties:
- env:
- type: array
- items:
- type: object
- properties:
- name:
- type: string
- description: The environment variable key.
- value:
- type: string
- description: The environment variable value.
- description: Environment variables which should be applied to the container.
- securityContext:
- type: object
- properties:
- allowPrivilegeEscalation:
- type: boolean
- appArmorProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- capabilities:
- type: object
- properties:
- add:
- type: array
- items:
- type: string
- drop:
- type: array
- items:
- type: string
- privileged:
- type: boolean
- procMount:
- type: string
- readOnlyRootFilesystem:
- type: boolean
- runAsGroup:
- type: integer
- runAsNonRoot:
- type: boolean
- runAsUser:
- type: integer
- seLinuxOptions:
- type: object
- properties:
- level:
- type: string
- role:
- type: string
- type:
- type: string
- user:
- type: string
- seccompProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- windowsOptions:
- type: object
- properties:
- gmsaCredentialSpec:
- type: string
- gmsaCredentialSpecName:
- type: string
- hostProcess:
- type: boolean
- runAsUserName:
- type: string
- description: Security context for the container.
- description: Template for the Kafka Connect container.
- initContainer:
- type: object
- properties:
- env:
- type: array
- items:
- type: object
- properties:
- name:
- type: string
- description: The environment variable key.
- value:
- type: string
- description: The environment variable value.
- description: Environment variables which should be applied to the container.
- securityContext:
- type: object
- properties:
- allowPrivilegeEscalation:
- type: boolean
- appArmorProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- capabilities:
- type: object
- properties:
- add:
- type: array
- items:
- type: string
- drop:
- type: array
- items:
- type: string
- privileged:
- type: boolean
- procMount:
- type: string
- readOnlyRootFilesystem:
- type: boolean
- runAsGroup:
- type: integer
- runAsNonRoot:
- type: boolean
- runAsUser:
- type: integer
- seLinuxOptions:
- type: object
- properties:
- level:
- type: string
- role:
- type: string
- type:
- type: string
- user:
- type: string
- seccompProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- windowsOptions:
- type: object
- properties:
- gmsaCredentialSpec:
- type: string
- gmsaCredentialSpecName:
- type: string
- hostProcess:
- type: boolean
- runAsUserName:
- type: string
- description: Security context for the container.
- description: Template for the Kafka init container.
- podDisruptionBudget:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata to apply to the `PodDisruptionBudgetTemplate` resource.
- maxUnavailable:
- type: integer
- minimum: 0
- description: "Maximum number of unavailable pods to allow automatic Pod eviction. A Pod eviction is allowed when the `maxUnavailable` number of pods or fewer are unavailable after the eviction. Setting this value to 0 prevents all voluntary evictions, so the pods must be evicted manually. Defaults to 1."
- description: Template for Kafka Connect `PodDisruptionBudget`.
- serviceAccount:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- description: Template for the Kafka Connect service account.
- clusterRoleBinding:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- description: Template for the Kafka Connect ClusterRoleBinding.
- buildPod:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- imagePullSecrets:
- type: array
- items:
- type: object
- properties:
- name:
- type: string
- description: "List of references to secrets in the same namespace to use for pulling any of the images used by this Pod. When the `STRIMZI_IMAGE_PULL_SECRETS` environment variable in Cluster Operator and the `imagePullSecrets` option are specified, only the `imagePullSecrets` variable is used and the `STRIMZI_IMAGE_PULL_SECRETS` variable is ignored."
- securityContext:
- type: object
- properties:
- appArmorProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- fsGroup:
- type: integer
- fsGroupChangePolicy:
- type: string
- runAsGroup:
- type: integer
- runAsNonRoot:
- type: boolean
- runAsUser:
- type: integer
- seLinuxOptions:
- type: object
- properties:
- level:
- type: string
- role:
- type: string
- type:
- type: string
- user:
- type: string
- seccompProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- supplementalGroups:
- type: array
- items:
- type: integer
- sysctls:
- type: array
- items:
- type: object
- properties:
- name:
- type: string
- value:
- type: string
- windowsOptions:
- type: object
- properties:
- gmsaCredentialSpec:
- type: string
- gmsaCredentialSpecName:
- type: string
- hostProcess:
- type: boolean
- runAsUserName:
- type: string
- description: Configures pod-level security attributes and common container settings.
- terminationGracePeriodSeconds:
- type: integer
- minimum: 0
- description: "The grace period is the duration in seconds after the processes running in the pod are sent a termination signal, and the time when the processes are forcibly halted with a kill signal. Set this value to longer than the expected cleanup time for your process. Value must be a non-negative integer. A zero value indicates delete immediately. You might need to increase the grace period for very large Kafka clusters, so that the Kafka brokers have enough time to transfer their work to another broker before they are terminated. Defaults to 30 seconds."
- affinity:
- type: object
- properties:
- nodeAffinity:
- type: object
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- type: array
- items:
- type: object
- properties:
- preference:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchFields:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- weight:
- type: integer
- requiredDuringSchedulingIgnoredDuringExecution:
- type: object
- properties:
- nodeSelectorTerms:
- type: array
- items:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchFields:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- podAffinity:
- type: object
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- type: array
- items:
- type: object
- properties:
- podAffinityTerm:
- type: object
- properties:
- labelSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- matchLabelKeys:
- type: array
- items:
- type: string
- mismatchLabelKeys:
- type: array
- items:
- type: string
- namespaceSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- namespaces:
- type: array
- items:
- type: string
- topologyKey:
- type: string
- weight:
- type: integer
- requiredDuringSchedulingIgnoredDuringExecution:
- type: array
- items:
- type: object
- properties:
- labelSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- matchLabelKeys:
- type: array
- items:
- type: string
- mismatchLabelKeys:
- type: array
- items:
- type: string
- namespaceSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- namespaces:
- type: array
- items:
- type: string
- topologyKey:
- type: string
- podAntiAffinity:
- type: object
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- type: array
- items:
- type: object
- properties:
- podAffinityTerm:
- type: object
- properties:
- labelSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- matchLabelKeys:
- type: array
- items:
- type: string
- mismatchLabelKeys:
- type: array
- items:
- type: string
- namespaceSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- namespaces:
- type: array
- items:
- type: string
- topologyKey:
- type: string
- weight:
- type: integer
- requiredDuringSchedulingIgnoredDuringExecution:
- type: array
- items:
- type: object
- properties:
- labelSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- matchLabelKeys:
- type: array
- items:
- type: string
- mismatchLabelKeys:
- type: array
- items:
- type: string
- namespaceSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- namespaces:
- type: array
- items:
- type: string
- topologyKey:
- type: string
- description: The pod's affinity rules.
- tolerations:
- type: array
- items:
- type: object
- properties:
- effect:
- type: string
- key:
- type: string
- operator:
- type: string
- tolerationSeconds:
- type: integer
- value:
- type: string
- description: The pod's tolerations.
- topologySpreadConstraints:
- type: array
- items:
- type: object
- properties:
- labelSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- matchLabelKeys:
- type: array
- items:
- type: string
- maxSkew:
- type: integer
- minDomains:
- type: integer
- nodeAffinityPolicy:
- type: string
- nodeTaintsPolicy:
- type: string
- topologyKey:
- type: string
- whenUnsatisfiable:
- type: string
- description: The pod's topology spread constraints.
- priorityClassName:
- type: string
- description: 'The name of the priority class used to assign priority to the pods. '
- schedulerName:
- type: string
- description: "The name of the scheduler used to dispatch this `Pod`. If not specified, the default scheduler will be used."
- hostAliases:
- type: array
- items:
- type: object
- properties:
- hostnames:
- type: array
- items:
- type: string
- ip:
- type: string
- description: The pod's HostAliases. HostAliases is an optional list of hosts and IPs that will be injected into the Pod's hosts file if specified.
- enableServiceLinks:
- type: boolean
- description: Indicates whether information about services should be injected into Pod's environment variables.
- tmpDirSizeLimit:
- type: string
- pattern: "^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
- description: Defines the total amount (for example `1Gi`) of local storage required for temporary EmptyDir volume (`/tmp`). Default value is `5Mi`.
- description: Template for Kafka Connect Build `Pods`. The build pod is used only on Kubernetes.
- buildContainer:
- type: object
- properties:
- env:
- type: array
- items:
- type: object
- properties:
- name:
- type: string
- description: The environment variable key.
- value:
- type: string
- description: The environment variable value.
- description: Environment variables which should be applied to the container.
- securityContext:
- type: object
- properties:
- allowPrivilegeEscalation:
- type: boolean
- appArmorProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- capabilities:
- type: object
- properties:
- add:
- type: array
- items:
- type: string
- drop:
- type: array
- items:
- type: string
- privileged:
- type: boolean
- procMount:
- type: string
- readOnlyRootFilesystem:
- type: boolean
- runAsGroup:
- type: integer
- runAsNonRoot:
- type: boolean
- runAsUser:
- type: integer
- seLinuxOptions:
- type: object
- properties:
- level:
- type: string
- role:
- type: string
- type:
- type: string
- user:
- type: string
- seccompProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- windowsOptions:
- type: object
- properties:
- gmsaCredentialSpec:
- type: string
- gmsaCredentialSpecName:
- type: string
- hostProcess:
- type: boolean
- runAsUserName:
- type: string
- description: Security context for the container.
- description: Template for the Kafka Connect Build container. The build container is used only on Kubernetes.
- buildConfig:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata to apply to the `PodDisruptionBudgetTemplate` resource.
- pullSecret:
- type: string
- description: Container Registry Secret with the credentials for pulling the base image.
- description: Template for the Kafka Connect BuildConfig used to build new container images. The BuildConfig is used only on OpenShift.
- buildServiceAccount:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- description: Template for the Kafka Connect Build service account.
- jmxSecret:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- description: Template for Secret of the Kafka Connect Cluster JMX authentication.
- description: "Template for Kafka Connect and Kafka Mirror Maker 2 resources. The template allows users to specify how the `Pods`, `Service`, and other services are generated."
- externalConfiguration:
- type: object
- properties:
- env:
- type: array
- items:
- type: object
- properties:
- name:
- type: string
- description: Name of the environment variable which will be passed to the Kafka Connect pods. The name of the environment variable cannot start with `KAFKA_` or `STRIMZI_`.
- valueFrom:
- type: object
- properties:
- secretKeyRef:
- type: object
- properties:
- key:
- type: string
- name:
- type: string
- optional:
- type: boolean
- description: Reference to a key in a Secret.
- configMapKeyRef:
- type: object
- properties:
- key:
- type: string
- name:
- type: string
- optional:
- type: boolean
- description: Reference to a key in a ConfigMap.
- description: Value of the environment variable which will be passed to the Kafka Connect pods. It can be passed either as a reference to Secret or ConfigMap field. The field has to specify exactly one Secret or ConfigMap.
- required:
- - name
- - valueFrom
- description: Makes data from a Secret or ConfigMap available in the Kafka Connect pods as environment variables.
- volumes:
- type: array
- items:
- type: object
- properties:
- name:
- type: string
- description: Name of the volume which will be added to the Kafka Connect pods.
- secret:
- type: object
- properties:
- defaultMode:
- type: integer
- items:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- mode:
- type: integer
- path:
- type: string
- optional:
- type: boolean
- secretName:
- type: string
- description: Reference to a key in a Secret. Exactly one Secret or ConfigMap has to be specified.
- configMap:
- type: object
- properties:
- defaultMode:
- type: integer
- items:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- mode:
- type: integer
- path:
- type: string
- name:
- type: string
- optional:
- type: boolean
- description: Reference to a key in a ConfigMap. Exactly one Secret or ConfigMap has to be specified.
- required:
- - name
- description: Makes data from a Secret or ConfigMap available in the Kafka Connect pods as volumes.
- description: Pass data from Secrets or ConfigMaps to the Kafka Connect pods and use them to configure connectors.
- build:
- type: object
- properties:
- output:
- type: object
- properties:
- additionalKanikoOptions:
- type: array
- items:
- type: string
- description: "Configures additional options which will be passed to the Kaniko executor when building the new Connect image. Allowed options are: --customPlatform, --insecure, --insecure-pull, --insecure-registry, --log-format, --log-timestamp, --registry-mirror, --reproducible, --single-snapshot, --skip-tls-verify, --skip-tls-verify-pull, --skip-tls-verify-registry, --verbosity, --snapshotMode, --use-new-run. These options will be used only on Kubernetes where the Kaniko executor is used. They will be ignored on OpenShift. The options are described in the link:https://github.com/GoogleContainerTools/kaniko[Kaniko GitHub repository^]. Changing this field does not trigger new build of the Kafka Connect image."
- image:
- type: string
- description: The name of the image which will be built. Required.
- pushSecret:
- type: string
- description: Container Registry Secret with the credentials for pushing the newly built image.
- type:
- type: string
- enum:
- - docker
- - imagestream
- description: Output type. Must be either `docker` for pushing the newly build image to Docker compatible registry or `imagestream` for pushing the image to OpenShift ImageStream. Required.
- required:
- - image
- - type
- description: Configures where should the newly built image be stored. Required.
- plugins:
- type: array
- items:
- type: object
- properties:
- name:
- type: string
- pattern: "^[a-z0-9][-_a-z0-9]*[a-z0-9]$"
- description: "The unique name of the connector plugin. Will be used to generate the path where the connector artifacts will be stored. The name has to be unique within the KafkaConnect resource. The name has to follow the following pattern: `^[a-z][-_a-z0-9]*[a-z]$`. Required."
- artifacts:
- type: array
- items:
- type: object
- properties:
- artifact:
- type: string
- description: Maven artifact id. Applicable to the `maven` artifact type only.
- fileName:
- type: string
- description: Name under which the artifact will be stored.
- group:
- type: string
- description: Maven group id. Applicable to the `maven` artifact type only.
- insecure:
- type: boolean
- description: "By default, connections using TLS are verified to check they are secure. The server certificate used must be valid, trusted, and contain the server name. By setting this option to `true`, all TLS verification is disabled and the artifact will be downloaded, even when the server is considered insecure."
- repository:
- type: string
- description: Maven repository to download the artifact from. Applicable to the `maven` artifact type only.
- sha512sum:
- type: string
- description: "SHA512 checksum of the artifact. Optional. If specified, the checksum will be verified while building the new container. If not specified, the downloaded artifact will not be verified. Not applicable to the `maven` artifact type. "
- type:
- type: string
- enum:
- - jar
- - tgz
- - zip
- - maven
- - other
- description: "Artifact type. Currently, the supported artifact types are `tgz`, `jar`, `zip`, `other` and `maven`."
- url:
- type: string
- pattern: "^(https?|ftp)://[-a-zA-Z0-9+&@#/%?=~_|!:,.;]*[-a-zA-Z0-9+&@#/%=~_|]$"
- description: "URL of the artifact which will be downloaded. Strimzi does not do any security scanning of the downloaded artifacts. For security reasons, you should first verify the artifacts manually and configure the checksum verification to make sure the same artifact is used in the automated build. Required for `jar`, `zip`, `tgz` and `other` artifacts. Not applicable to the `maven` artifact type."
- version:
- type: string
- description: Maven version number. Applicable to the `maven` artifact type only.
- required:
- - type
- description: List of artifacts which belong to this connector plugin. Required.
- required:
- - name
- - artifacts
- description: List of connector plugins which should be added to the Kafka Connect. Required.
- resources:
- type: object
- properties:
- claims:
- type: array
- items:
- type: object
- properties:
- name:
- type: string
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
- x-kubernetes-int-or-string: true
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
- x-kubernetes-int-or-string: true
- type: object
- description: CPU and memory resources to reserve for the build.
- required:
- - output
- - plugins
- description: Configures how the Connect container image should be built. Optional.
- required:
- - bootstrapServers
- description: The specification of the Kafka Connect cluster.
- status:
- type: object
- properties:
- conditions:
- type: array
- items:
- type: object
- properties:
- type:
- type: string
- description: "The unique identifier of a condition, used to distinguish between other conditions in the resource."
- status:
- type: string
- description: "The status of the condition, either True, False or Unknown."
- lastTransitionTime:
- type: string
- description: "Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone."
- reason:
- type: string
- description: The reason for the condition's last transition (a single word in CamelCase).
- message:
- type: string
- description: Human-readable message indicating details about the condition's last transition.
- description: List of status conditions.
- observedGeneration:
- type: integer
- description: The generation of the CRD that was last reconciled by the operator.
- url:
- type: string
- description: The URL of the REST API endpoint for managing and monitoring Kafka Connect connectors.
- connectorPlugins:
- type: array
- items:
- type: object
- properties:
- class:
- type: string
- description: The class of the connector plugin.
- type:
- type: string
- description: The type of the connector plugin. The available types are `sink` and `source`.
- version:
- type: string
- description: The version of the connector plugin.
- description: The list of connector plugins available in this Kafka Connect deployment.
- replicas:
- type: integer
- description: The current number of pods being used to provide this resource.
- labelSelector:
- type: string
- description: Label selector for pods providing this resource.
- description: The status of the Kafka Connect cluster.
-
----
-apiVersion: apiextensions.k8s.io/v1
-kind: CustomResourceDefinition
-metadata:
- name: kafkabridges.kafka.strimzi.io
- labels:
- app: strimzi
- strimzi.io/crd-install: "true"
-spec:
- group: kafka.strimzi.io
- names:
- kind: KafkaBridge
- listKind: KafkaBridgeList
- singular: kafkabridge
- plural: kafkabridges
- shortNames:
- - kb
- categories:
- - strimzi
- scope: Namespaced
- conversion:
- strategy: None
- versions:
- - name: v1beta2
- served: true
- storage: true
- subresources:
- status: {}
- scale:
- specReplicasPath: .spec.replicas
- statusReplicasPath: .status.replicas
- labelSelectorPath: .status.labelSelector
- additionalPrinterColumns:
- - name: Desired replicas
- description: The desired number of Kafka Bridge replicas
- jsonPath: .spec.replicas
- type: integer
- - name: Bootstrap Servers
- description: The boostrap servers
- jsonPath: .spec.bootstrapServers
- type: string
- priority: 1
- - name: Ready
- description: The state of the custom resource
- jsonPath: ".status.conditions[?(@.type==\"Ready\")].status"
- type: string
- schema:
- openAPIV3Schema:
- type: object
- properties:
- apiVersion:
- type: string
- description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources"
- kind:
- type: string
- description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
- metadata:
- type: object
- spec:
- type: object
- properties:
- replicas:
- type: integer
- minimum: 0
- description: The number of pods in the `Deployment`. Defaults to `1`.
- image:
- type: string
- description: "The container image used for Kafka Bridge pods. If no image name is explicitly specified, the image name corresponds to the image specified in the Cluster Operator configuration. If an image name is not defined in the Cluster Operator configuration, a default value is used."
- bootstrapServers:
- type: string
- description: A list of host:port pairs for establishing the initial connection to the Kafka cluster.
- tls:
- type: object
- properties:
- trustedCertificates:
- type: array
- items:
- type: object
- properties:
- secretName:
- type: string
- description: The name of the Secret containing the certificate.
- certificate:
- type: string
- description: The name of the file certificate in the secret.
- pattern:
- type: string
- description: "Pattern for the certificate files in the secret. Use the link:https://en.wikipedia.org/wiki/Glob_(programming)[_glob syntax_] for the pattern. All files in the secret that match the pattern are used."
- oneOf:
- - properties:
- certificate: {}
- required:
- - certificate
- - properties:
- pattern: {}
- required:
- - pattern
- required:
- - secretName
- description: Trusted certificates for TLS connection.
- description: TLS configuration for connecting Kafka Bridge to the cluster.
- authentication:
- type: object
- properties:
- accessToken:
- type: object
- properties:
- key:
- type: string
- description: The key under which the secret value is stored in the Kubernetes Secret.
- secretName:
- type: string
- description: The name of the Kubernetes Secret containing the secret value.
- required:
- - key
- - secretName
- description: Link to Kubernetes Secret containing the access token which was obtained from the authorization server.
- accessTokenIsJwt:
- type: boolean
- description: Configure whether access token should be treated as JWT. This should be set to `false` if the authorization server returns opaque tokens. Defaults to `true`.
- audience:
- type: string
- description: "OAuth audience to use when authenticating against the authorization server. Some authorization servers require the audience to be explicitly set. The possible values depend on how the authorization server is configured. By default, `audience` is not specified when performing the token endpoint request."
- certificateAndKey:
- type: object
- properties:
- secretName:
- type: string
- description: The name of the Secret containing the certificate.
- certificate:
- type: string
- description: The name of the file certificate in the Secret.
- key:
- type: string
- description: The name of the private key in the Secret.
- required:
- - secretName
- - certificate
- - key
- description: Reference to the `Secret` which holds the certificate and private key pair.
- clientId:
- type: string
- description: OAuth Client ID which the Kafka client can use to authenticate against the OAuth server and use the token endpoint URI.
- clientSecret:
- type: object
- properties:
- key:
- type: string
- description: The key under which the secret value is stored in the Kubernetes Secret.
- secretName:
- type: string
- description: The name of the Kubernetes Secret containing the secret value.
- required:
- - key
- - secretName
- description: Link to Kubernetes Secret containing the OAuth client secret which the Kafka client can use to authenticate against the OAuth server and use the token endpoint URI.
- connectTimeoutSeconds:
- type: integer
- description: "The connect timeout in seconds when connecting to authorization server. If not set, the effective connect timeout is 60 seconds."
- disableTlsHostnameVerification:
- type: boolean
- description: Enable or disable TLS hostname verification. Default value is `false`.
- enableMetrics:
- type: boolean
- description: Enable or disable OAuth metrics. Default value is `false`.
- httpRetries:
- type: integer
- description: "The maximum number of retries to attempt if an initial HTTP request fails. If not set, the default is to not attempt any retries."
- httpRetryPauseMs:
- type: integer
- description: "The pause to take before retrying a failed HTTP request. If not set, the default is to not pause at all but to immediately repeat a request."
- includeAcceptHeader:
- type: boolean
- description: Whether the Accept header should be set in requests to the authorization servers. The default value is `true`.
- maxTokenExpirySeconds:
- type: integer
- description: Set or limit time-to-live of the access tokens to the specified number of seconds. This should be set if the authorization server returns opaque tokens.
- passwordSecret:
- type: object
- properties:
- secretName:
- type: string
- description: The name of the Secret containing the password.
- password:
- type: string
- description: The name of the key in the Secret under which the password is stored.
- required:
- - secretName
- - password
- description: Reference to the `Secret` which holds the password.
- readTimeoutSeconds:
- type: integer
- description: "The read timeout in seconds when connecting to authorization server. If not set, the effective read timeout is 60 seconds."
- refreshToken:
- type: object
- properties:
- key:
- type: string
- description: The key under which the secret value is stored in the Kubernetes Secret.
- secretName:
- type: string
- description: The name of the Kubernetes Secret containing the secret value.
- required:
- - key
- - secretName
- description: Link to Kubernetes Secret containing the refresh token which can be used to obtain access token from the authorization server.
- scope:
- type: string
- description: OAuth scope to use when authenticating against the authorization server. Some authorization servers require this to be set. The possible values depend on how authorization server is configured. By default `scope` is not specified when doing the token endpoint request.
- tlsTrustedCertificates:
- type: array
- items:
- type: object
- properties:
- secretName:
- type: string
- description: The name of the Secret containing the certificate.
- certificate:
- type: string
- description: The name of the file certificate in the secret.
- pattern:
- type: string
- description: "Pattern for the certificate files in the secret. Use the link:https://en.wikipedia.org/wiki/Glob_(programming)[_glob syntax_] for the pattern. All files in the secret that match the pattern are used."
- oneOf:
- - properties:
- certificate: {}
- required:
- - certificate
- - properties:
- pattern: {}
- required:
- - pattern
- required:
- - secretName
- description: Trusted certificates for TLS connection to the OAuth server.
- tokenEndpointUri:
- type: string
- description: Authorization server token endpoint URI.
- type:
- type: string
- enum:
- - tls
- - scram-sha-256
- - scram-sha-512
- - plain
- - oauth
- description: "Authentication type. Currently the supported types are `tls`, `scram-sha-256`, `scram-sha-512`, `plain`, and 'oauth'. `scram-sha-256` and `scram-sha-512` types use SASL SCRAM-SHA-256 and SASL SCRAM-SHA-512 Authentication, respectively. `plain` type uses SASL PLAIN Authentication. `oauth` type uses SASL OAUTHBEARER Authentication. The `tls` type uses TLS Client Authentication. The `tls` type is supported only over TLS connections."
- username:
- type: string
- description: Username used for the authentication.
- required:
- - type
- description: Authentication configuration for connecting to the cluster.
- http:
- type: object
- properties:
- port:
- type: integer
- minimum: 1023
- description: The port which is the server listening on.
- cors:
- type: object
- properties:
- allowedOrigins:
- type: array
- items:
- type: string
- description: List of allowed origins. Java regular expressions can be used.
- allowedMethods:
- type: array
- items:
- type: string
- description: List of allowed HTTP methods.
- required:
- - allowedOrigins
- - allowedMethods
- description: CORS configuration for the HTTP Bridge.
- description: The HTTP related configuration.
- adminClient:
- type: object
- properties:
- config:
- x-kubernetes-preserve-unknown-fields: true
- type: object
- description: The Kafka AdminClient configuration used for AdminClient instances created by the bridge.
- description: Kafka AdminClient related configuration.
- consumer:
- type: object
- properties:
- enabled:
- type: boolean
- description: "Whether the HTTP consumer should be enabled or disabled, default is enabled."
- timeoutSeconds:
- type: integer
- description: "The timeout in seconds for deleting inactive consumers, default is -1 (disabled)."
- config:
- x-kubernetes-preserve-unknown-fields: true
- type: object
- description: "The Kafka consumer configuration used for consumer instances created by the bridge. Properties with the following prefixes cannot be set: ssl., bootstrap.servers, group.id, sasl., security. (with the exception of: ssl.endpoint.identification.algorithm, ssl.cipher.suites, ssl.protocol, ssl.enabled.protocols)."
- description: Kafka consumer related configuration.
- producer:
- type: object
- properties:
- enabled:
- type: boolean
- description: "Whether the HTTP producer should be enabled or disabled, default is enabled."
- config:
- x-kubernetes-preserve-unknown-fields: true
- type: object
- description: "The Kafka producer configuration used for producer instances created by the bridge. Properties with the following prefixes cannot be set: ssl., bootstrap.servers, sasl., security. (with the exception of: ssl.endpoint.identification.algorithm, ssl.cipher.suites, ssl.protocol, ssl.enabled.protocols)."
- description: Kafka producer related configuration.
- resources:
- type: object
- properties:
- claims:
- type: array
- items:
- type: object
- properties:
- name:
- type: string
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
- x-kubernetes-int-or-string: true
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
- x-kubernetes-int-or-string: true
- type: object
- description: CPU and memory resources to reserve.
- jvmOptions:
- type: object
- properties:
- "-XX":
- additionalProperties:
- type: string
- type: object
- description: A map of -XX options to the JVM.
- "-Xmx":
- type: string
- pattern: "^[0-9]+[mMgG]?$"
- description: -Xmx option to to the JVM.
- "-Xms":
- type: string
- pattern: "^[0-9]+[mMgG]?$"
- description: -Xms option to to the JVM.
- gcLoggingEnabled:
- type: boolean
- description: Specifies whether the Garbage Collection logging is enabled. The default is false.
- javaSystemProperties:
- type: array
- items:
- type: object
- properties:
- name:
- type: string
- description: The system property name.
- value:
- type: string
- description: The system property value.
- description: A map of additional system properties which will be passed using the `-D` option to the JVM.
- description: '**Currently not supported** JVM Options for pods.'
- logging:
- type: object
- properties:
- loggers:
- additionalProperties:
- type: string
- type: object
- description: A Map from logger name to logger level.
- type:
- type: string
- enum:
- - inline
- - external
- description: "Logging type, must be either 'inline' or 'external'."
- valueFrom:
- type: object
- properties:
- configMapKeyRef:
- type: object
- properties:
- key:
- type: string
- name:
- type: string
- optional:
- type: boolean
- description: Reference to the key in the ConfigMap containing the configuration.
- description: '`ConfigMap` entry where the logging configuration is stored. '
- required:
- - type
- description: Logging configuration for Kafka Bridge.
- clientRackInitImage:
- type: string
- description: The image of the init container used for initializing the `client.rack`.
- rack:
- type: object
- properties:
- topologyKey:
- type: string
- example: topology.kubernetes.io/zone
- description: "A key that matches labels assigned to the Kubernetes cluster nodes. The value of the label is used to set a broker's `broker.rack` config, and the `client.rack` config for Kafka Connect or MirrorMaker 2."
- required:
- - topologyKey
- description: Configuration of the node label which will be used as the client.rack consumer configuration.
- enableMetrics:
- type: boolean
- description: Enable the metrics for the Kafka Bridge. Default is false.
- livenessProbe:
- type: object
- properties:
- initialDelaySeconds:
- type: integer
- minimum: 0
- description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
- timeoutSeconds:
- type: integer
- minimum: 1
- description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
- periodSeconds:
- type: integer
- minimum: 1
- description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
- successThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
- failureThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
- description: Pod liveness checking.
- readinessProbe:
- type: object
- properties:
- initialDelaySeconds:
- type: integer
- minimum: 0
- description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
- timeoutSeconds:
- type: integer
- minimum: 1
- description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
- periodSeconds:
- type: integer
- minimum: 1
- description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
- successThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
- failureThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
- description: Pod readiness checking.
- template:
- type: object
- properties:
- deployment:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- deploymentStrategy:
- type: string
- enum:
- - RollingUpdate
- - Recreate
- description: Pod replacement strategy for deployment configuration changes. Valid values are `RollingUpdate` and `Recreate`. Defaults to `RollingUpdate`.
- description: Template for Kafka Bridge `Deployment`.
- pod:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- imagePullSecrets:
- type: array
- items:
- type: object
- properties:
- name:
- type: string
- description: "List of references to secrets in the same namespace to use for pulling any of the images used by this Pod. When the `STRIMZI_IMAGE_PULL_SECRETS` environment variable in Cluster Operator and the `imagePullSecrets` option are specified, only the `imagePullSecrets` variable is used and the `STRIMZI_IMAGE_PULL_SECRETS` variable is ignored."
- securityContext:
- type: object
- properties:
- appArmorProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- fsGroup:
- type: integer
- fsGroupChangePolicy:
- type: string
- runAsGroup:
- type: integer
- runAsNonRoot:
- type: boolean
- runAsUser:
- type: integer
- seLinuxOptions:
- type: object
- properties:
- level:
- type: string
- role:
- type: string
- type:
- type: string
- user:
- type: string
- seccompProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- supplementalGroups:
- type: array
- items:
- type: integer
- sysctls:
- type: array
- items:
- type: object
- properties:
- name:
- type: string
- value:
- type: string
- windowsOptions:
- type: object
- properties:
- gmsaCredentialSpec:
- type: string
- gmsaCredentialSpecName:
- type: string
- hostProcess:
- type: boolean
- runAsUserName:
- type: string
- description: Configures pod-level security attributes and common container settings.
- terminationGracePeriodSeconds:
- type: integer
- minimum: 0
- description: "The grace period is the duration in seconds after the processes running in the pod are sent a termination signal, and the time when the processes are forcibly halted with a kill signal. Set this value to longer than the expected cleanup time for your process. Value must be a non-negative integer. A zero value indicates delete immediately. You might need to increase the grace period for very large Kafka clusters, so that the Kafka brokers have enough time to transfer their work to another broker before they are terminated. Defaults to 30 seconds."
- affinity:
- type: object
- properties:
- nodeAffinity:
- type: object
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- type: array
- items:
- type: object
- properties:
- preference:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchFields:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- weight:
- type: integer
- requiredDuringSchedulingIgnoredDuringExecution:
- type: object
- properties:
- nodeSelectorTerms:
- type: array
- items:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchFields:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- podAffinity:
- type: object
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- type: array
- items:
- type: object
- properties:
- podAffinityTerm:
- type: object
- properties:
- labelSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- matchLabelKeys:
- type: array
- items:
- type: string
- mismatchLabelKeys:
- type: array
- items:
- type: string
- namespaceSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- namespaces:
- type: array
- items:
- type: string
- topologyKey:
- type: string
- weight:
- type: integer
- requiredDuringSchedulingIgnoredDuringExecution:
- type: array
- items:
- type: object
- properties:
- labelSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- matchLabelKeys:
- type: array
- items:
- type: string
- mismatchLabelKeys:
- type: array
- items:
- type: string
- namespaceSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- namespaces:
- type: array
- items:
- type: string
- topologyKey:
- type: string
- podAntiAffinity:
- type: object
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- type: array
- items:
- type: object
- properties:
- podAffinityTerm:
- type: object
- properties:
- labelSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- matchLabelKeys:
- type: array
- items:
- type: string
- mismatchLabelKeys:
- type: array
- items:
- type: string
- namespaceSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- namespaces:
- type: array
- items:
- type: string
- topologyKey:
- type: string
- weight:
- type: integer
- requiredDuringSchedulingIgnoredDuringExecution:
- type: array
- items:
- type: object
- properties:
- labelSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- matchLabelKeys:
- type: array
- items:
- type: string
- mismatchLabelKeys:
- type: array
- items:
- type: string
- namespaceSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- namespaces:
- type: array
- items:
- type: string
- topologyKey:
- type: string
- description: The pod's affinity rules.
- tolerations:
- type: array
- items:
- type: object
- properties:
- effect:
- type: string
- key:
- type: string
- operator:
- type: string
- tolerationSeconds:
- type: integer
- value:
- type: string
- description: The pod's tolerations.
- topologySpreadConstraints:
- type: array
- items:
- type: object
- properties:
- labelSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- matchLabelKeys:
- type: array
- items:
- type: string
- maxSkew:
- type: integer
- minDomains:
- type: integer
- nodeAffinityPolicy:
- type: string
- nodeTaintsPolicy:
- type: string
- topologyKey:
- type: string
- whenUnsatisfiable:
- type: string
- description: The pod's topology spread constraints.
- priorityClassName:
- type: string
- description: 'The name of the priority class used to assign priority to the pods. '
- schedulerName:
- type: string
- description: "The name of the scheduler used to dispatch this `Pod`. If not specified, the default scheduler will be used."
- hostAliases:
- type: array
- items:
- type: object
- properties:
- hostnames:
- type: array
- items:
- type: string
- ip:
- type: string
- description: The pod's HostAliases. HostAliases is an optional list of hosts and IPs that will be injected into the Pod's hosts file if specified.
- enableServiceLinks:
- type: boolean
- description: Indicates whether information about services should be injected into Pod's environment variables.
- tmpDirSizeLimit:
- type: string
- pattern: "^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
- description: Defines the total amount (for example `1Gi`) of local storage required for temporary EmptyDir volume (`/tmp`). Default value is `5Mi`.
- description: Template for Kafka Bridge `Pods`.
- apiService:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- ipFamilyPolicy:
- type: string
- enum:
- - SingleStack
- - PreferDualStack
- - RequireDualStack
- description: "Specifies the IP Family Policy used by the service. Available options are `SingleStack`, `PreferDualStack` and `RequireDualStack`. `SingleStack` is for a single IP family. `PreferDualStack` is for two IP families on dual-stack configured clusters or a single IP family on single-stack clusters. `RequireDualStack` fails unless there are two IP families on dual-stack configured clusters. If unspecified, Kubernetes will choose the default value based on the service type."
- ipFamilies:
- type: array
- items:
- type: string
- enum:
- - IPv4
- - IPv6
- description: "Specifies the IP Families used by the service. Available options are `IPv4` and `IPv6`. If unspecified, Kubernetes will choose the default value based on the `ipFamilyPolicy` setting."
- description: Template for Kafka Bridge API `Service`.
- podDisruptionBudget:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata to apply to the `PodDisruptionBudgetTemplate` resource.
- maxUnavailable:
- type: integer
- minimum: 0
- description: "Maximum number of unavailable pods to allow automatic Pod eviction. A Pod eviction is allowed when the `maxUnavailable` number of pods or fewer are unavailable after the eviction. Setting this value to 0 prevents all voluntary evictions, so the pods must be evicted manually. Defaults to 1."
- description: Template for Kafka Bridge `PodDisruptionBudget`.
- bridgeContainer:
- type: object
- properties:
- env:
- type: array
- items:
- type: object
- properties:
- name:
- type: string
- description: The environment variable key.
- value:
- type: string
- description: The environment variable value.
- description: Environment variables which should be applied to the container.
- securityContext:
- type: object
- properties:
- allowPrivilegeEscalation:
- type: boolean
- appArmorProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- capabilities:
- type: object
- properties:
- add:
- type: array
- items:
- type: string
- drop:
- type: array
- items:
- type: string
- privileged:
- type: boolean
- procMount:
- type: string
- readOnlyRootFilesystem:
- type: boolean
- runAsGroup:
- type: integer
- runAsNonRoot:
- type: boolean
- runAsUser:
- type: integer
- seLinuxOptions:
- type: object
- properties:
- level:
- type: string
- role:
- type: string
- type:
- type: string
- user:
- type: string
- seccompProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- windowsOptions:
- type: object
- properties:
- gmsaCredentialSpec:
- type: string
- gmsaCredentialSpecName:
- type: string
- hostProcess:
- type: boolean
- runAsUserName:
- type: string
- description: Security context for the container.
- description: Template for the Kafka Bridge container.
- clusterRoleBinding:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- description: Template for the Kafka Bridge ClusterRoleBinding.
- serviceAccount:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- description: Template for the Kafka Bridge service account.
- initContainer:
- type: object
- properties:
- env:
- type: array
- items:
- type: object
- properties:
- name:
- type: string
- description: The environment variable key.
- value:
- type: string
- description: The environment variable value.
- description: Environment variables which should be applied to the container.
- securityContext:
- type: object
- properties:
- allowPrivilegeEscalation:
- type: boolean
- appArmorProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- capabilities:
- type: object
- properties:
- add:
- type: array
- items:
- type: string
- drop:
- type: array
- items:
- type: string
- privileged:
- type: boolean
- procMount:
- type: string
- readOnlyRootFilesystem:
- type: boolean
- runAsGroup:
- type: integer
- runAsNonRoot:
- type: boolean
- runAsUser:
- type: integer
- seLinuxOptions:
- type: object
- properties:
- level:
- type: string
- role:
- type: string
- type:
- type: string
- user:
- type: string
- seccompProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- windowsOptions:
- type: object
- properties:
- gmsaCredentialSpec:
- type: string
- gmsaCredentialSpecName:
- type: string
- hostProcess:
- type: boolean
- runAsUserName:
- type: string
- description: Security context for the container.
- description: Template for the Kafka Bridge init container.
- description: Template for Kafka Bridge resources. The template allows users to specify how a `Deployment` and `Pod` is generated.
- tracing:
- type: object
- properties:
- type:
- type: string
- enum:
- - jaeger
- - opentelemetry
- description: "Type of the tracing used. Currently the only supported type is `opentelemetry` for OpenTelemetry tracing. As of Strimzi 0.37.0, `jaeger` type is not supported anymore and this option is ignored."
- required:
- - type
- description: The configuration of tracing in Kafka Bridge.
- required:
- - bootstrapServers
- description: The specification of the Kafka Bridge.
- status:
- type: object
- properties:
- conditions:
- type: array
- items:
- type: object
- properties:
- type:
- type: string
- description: "The unique identifier of a condition, used to distinguish between other conditions in the resource."
- status:
- type: string
- description: "The status of the condition, either True, False or Unknown."
- lastTransitionTime:
- type: string
- description: "Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone."
- reason:
- type: string
- description: The reason for the condition's last transition (a single word in CamelCase).
- message:
- type: string
- description: Human-readable message indicating details about the condition's last transition.
- description: List of status conditions.
- observedGeneration:
- type: integer
- description: The generation of the CRD that was last reconciled by the operator.
- url:
- type: string
- description: The URL at which external client applications can access the Kafka Bridge.
- replicas:
- type: integer
- description: The current number of pods being used to provide this resource.
- labelSelector:
- type: string
- description: Label selector for pods providing this resource.
- description: The status of the Kafka Bridge.
-
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
- name: strimzi-cluster-operator
- labels:
- app: strimzi
-subjects:
- - kind: ServiceAccount
- name: strimzi-cluster-operator
- namespace: myproject
-roleRef:
- kind: ClusterRole
- name: strimzi-cluster-operator-namespaced
- apiGroup: rbac.authorization.k8s.io
-
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
- name: strimzi-cluster-operator-kafka-broker-delegation
- labels:
- app: strimzi
-# The Kafka broker cluster role must be bound to the cluster operator service account so that it can delegate the cluster role to the Kafka brokers.
-# This must be done to avoid escalating privileges which would be blocked by Kubernetes.
-subjects:
- - kind: ServiceAccount
- name: strimzi-cluster-operator
- namespace: myproject
-roleRef:
- kind: ClusterRole
- name: strimzi-kafka-broker
- apiGroup: rbac.authorization.k8s.io
-
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
- name: strimzi-cluster-operator-leader-election
- labels:
- app: strimzi
-rules:
- - apiGroups:
- - coordination.k8s.io
- resources:
- # The cluster operator needs to access and manage leases for leader election
- # The "create" verb cannot be used with "resourceNames"
- - leases
- verbs:
- - create
- - apiGroups:
- - coordination.k8s.io
- resources:
- # The cluster operator needs to access and manage leases for leader election
- - leases
- resourceNames:
- # The default RBAC files give the operator only access to the Lease resource names strimzi-cluster-operator
- # If you want to use another resource name or resource namespace, you have to configure the RBAC resources accordingly
- - strimzi-cluster-operator
- verbs:
- - get
- - list
- - watch
- - delete
- - patch
- - update
-
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: strimzi-cluster-operator
- labels:
- app: strimzi
-spec:
- replicas: 1
- selector:
- matchLabels:
- name: strimzi-cluster-operator
- strimzi.io/kind: cluster-operator
- template:
- metadata:
- labels:
- name: strimzi-cluster-operator
- strimzi.io/kind: cluster-operator
- spec:
- serviceAccountName: strimzi-cluster-operator
- volumes:
- - name: strimzi-tmp
- emptyDir:
- medium: Memory
- sizeLimit: 1Mi
- - name: co-config-volume
- configMap:
- name: strimzi-cluster-operator
- containers:
- - name: strimzi-cluster-operator
- image: quay.io/strimzi/operator:0.42.0
- ports:
- - containerPort: 8080
- name: http
- args:
- - /opt/strimzi/bin/cluster_operator_run.sh
- volumeMounts:
- - name: strimzi-tmp
- mountPath: /tmp
- - name: co-config-volume
- mountPath: /opt/strimzi/custom-config/
- env:
- - name: STRIMZI_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: STRIMZI_FULL_RECONCILIATION_INTERVAL_MS
- value: "120000"
- - name: STRIMZI_OPERATION_TIMEOUT_MS
- value: "300000"
- - name: STRIMZI_DEFAULT_KAFKA_EXPORTER_IMAGE
- value: quay.io/strimzi/kafka:0.42.0-kafka-3.7.1
- - name: STRIMZI_DEFAULT_CRUISE_CONTROL_IMAGE
- value: quay.io/strimzi/kafka:0.42.0-kafka-3.7.1
- - name: STRIMZI_KAFKA_IMAGES
- value: |
- 3.6.0=quay.io/strimzi/kafka:0.42.0-kafka-3.6.0
- 3.6.1=quay.io/strimzi/kafka:0.42.0-kafka-3.6.1
- 3.6.2=quay.io/strimzi/kafka:0.42.0-kafka-3.6.2
- 3.7.0=quay.io/strimzi/kafka:0.42.0-kafka-3.7.0
- 3.7.1=quay.io/strimzi/kafka:0.42.0-kafka-3.7.1
- - name: STRIMZI_KAFKA_CONNECT_IMAGES
- value: |
- 3.6.0=quay.io/strimzi/kafka:0.42.0-kafka-3.6.0
- 3.6.1=quay.io/strimzi/kafka:0.42.0-kafka-3.6.1
- 3.6.2=quay.io/strimzi/kafka:0.42.0-kafka-3.6.2
- 3.7.0=quay.io/strimzi/kafka:0.42.0-kafka-3.7.0
- 3.7.1=quay.io/strimzi/kafka:0.42.0-kafka-3.7.1
- - name: STRIMZI_KAFKA_MIRROR_MAKER_IMAGES
- value: |
- 3.6.0=quay.io/strimzi/kafka:0.42.0-kafka-3.6.0
- 3.6.1=quay.io/strimzi/kafka:0.42.0-kafka-3.6.1
- 3.6.2=quay.io/strimzi/kafka:0.42.0-kafka-3.6.2
- 3.7.0=quay.io/strimzi/kafka:0.42.0-kafka-3.7.0
- 3.7.1=quay.io/strimzi/kafka:0.42.0-kafka-3.7.1
- - name: STRIMZI_KAFKA_MIRROR_MAKER_2_IMAGES
- value: |
- 3.6.0=quay.io/strimzi/kafka:0.42.0-kafka-3.6.0
- 3.6.1=quay.io/strimzi/kafka:0.42.0-kafka-3.6.1
- 3.6.2=quay.io/strimzi/kafka:0.42.0-kafka-3.6.2
- 3.7.0=quay.io/strimzi/kafka:0.42.0-kafka-3.7.0
- 3.7.1=quay.io/strimzi/kafka:0.42.0-kafka-3.7.1
- - name: STRIMZI_DEFAULT_TOPIC_OPERATOR_IMAGE
- value: quay.io/strimzi/operator:0.42.0
- - name: STRIMZI_DEFAULT_USER_OPERATOR_IMAGE
- value: quay.io/strimzi/operator:0.42.0
- - name: STRIMZI_DEFAULT_KAFKA_INIT_IMAGE
- value: quay.io/strimzi/operator:0.42.0
- - name: STRIMZI_DEFAULT_KAFKA_BRIDGE_IMAGE
- value: quay.io/strimzi/kafka-bridge:0.29.0
- - name: STRIMZI_DEFAULT_KANIKO_EXECUTOR_IMAGE
- value: quay.io/strimzi/kaniko-executor:0.42.0
- - name: STRIMZI_DEFAULT_MAVEN_BUILDER
- value: quay.io/strimzi/maven-builder:0.42.0
- - name: STRIMZI_OPERATOR_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: STRIMZI_FEATURE_GATES
- value: ""
- - name: STRIMZI_LEADER_ELECTION_ENABLED
- value: "true"
- - name: STRIMZI_LEADER_ELECTION_LEASE_NAME
- value: "strimzi-cluster-operator"
- - name: STRIMZI_LEADER_ELECTION_LEASE_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: STRIMZI_LEADER_ELECTION_IDENTITY
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- livenessProbe:
- httpGet:
- path: /healthy
- port: http
- initialDelaySeconds: 10
- periodSeconds: 30
- readinessProbe:
- httpGet:
- path: /ready
- port: http
- initialDelaySeconds: 10
- periodSeconds: 30
- resources:
- limits:
- cpu: 1000m
- memory: 384Mi
- requests:
- cpu: 200m
- memory: 384Mi
-
----
-apiVersion: apiextensions.k8s.io/v1
-kind: CustomResourceDefinition
-metadata:
- name: strimzipodsets.core.strimzi.io
- labels:
- app: strimzi
- strimzi.io/crd-install: "true"
-spec:
- group: core.strimzi.io
- names:
- kind: StrimziPodSet
- listKind: StrimziPodSetList
- singular: strimzipodset
- plural: strimzipodsets
- shortNames:
- - sps
- categories:
- - strimzi
- scope: Namespaced
- conversion:
- strategy: None
- versions:
- - name: v1beta2
- served: true
- storage: true
- subresources:
- status: {}
- additionalPrinterColumns:
- - name: Pods
- description: Number of pods managed by the StrimziPodSet
- jsonPath: .status.pods
- type: integer
- - name: Ready Pods
- description: Number of ready pods managed by the StrimziPodSet
- jsonPath: .status.readyPods
- type: integer
- - name: Current Pods
- description: Number of up-to-date pods managed by the StrimziPodSet
- jsonPath: .status.currentPods
- type: integer
- - name: Age
- description: Age of the StrimziPodSet
- jsonPath: .metadata.creationTimestamp
- type: date
- schema:
- openAPIV3Schema:
- type: object
- properties:
- apiVersion:
- type: string
- description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources"
- kind:
- type: string
- description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
- metadata:
- type: object
- spec:
- type: object
- properties:
- selector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
+ enum:
+ - shared
+ description: "Specifies whether this volume should be used for storing KRaft metadata. This property is optional. When set, the only currently supported value is `shared`. At most one volume can have this property set."
+ overrides:
+ type: array
+ items:
+ type: object
+ properties:
+ class:
+ type: string
+ description: The storage class to use for dynamic volume allocation for this broker.
+ broker:
+ type: integer
+ description: Id of the kafka broker (broker identifier).
+ description: Overrides for individual brokers. The `overrides` field allows you to specify a different configuration for different brokers.
+ selector:
+ additionalProperties:
+ type: string
+ type: object
+ description: Specifies a specific persistent volume to use. It contains key:value pairs representing labels for selecting such a volume.
+ size:
+ type: string
+ description: "When `type=persistent-claim`, defines the size of the persistent volume claim, such as 100Gi. Mandatory when `type=persistent-claim`."
+ sizeLimit:
+ type: string
+ pattern: "^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
+ description: "When type=ephemeral, defines the total amount of local storage required for this EmptyDir volume (for example 1Gi)."
+ type:
+ type: string
+ enum:
+ - ephemeral
+ - persistent-claim
+ description: "Storage type, must be either 'ephemeral' or 'persistent-claim'."
+ required:
+ - type
+ description: List of volumes as Storage objects representing the JBOD disks array.
+ required:
+ - type
+ description: Storage configuration (disk). Cannot be updated. This property is required when node pools are not used.
+ authorization:
+ type: object
+ properties:
+ allowOnError:
+ type: boolean
+ description: "Defines whether a Kafka client should be allowed or denied by default when the authorizer fails to query the Open Policy Agent, for example, when it is temporarily unavailable). Defaults to `false` - all actions will be denied."
+ authorizerClass:
+ type: string
+ description: "Authorization implementation class, which must be available in classpath."
+ clientId:
+ type: string
+ description: OAuth Client ID which the Kafka client can use to authenticate against the OAuth server and use the token endpoint URI.
+ connectTimeoutSeconds:
+ type: integer
+ minimum: 1
+ description: "The connect timeout in seconds when connecting to authorization server. If not set, the effective connect timeout is 60 seconds."
+ delegateToKafkaAcls:
+ type: boolean
+ description: Whether authorization decision should be delegated to the 'Simple' authorizer if DENIED by Keycloak Authorization Services policies. Default value is `false`.
+ disableTlsHostnameVerification:
+ type: boolean
+ description: Enable or disable TLS hostname verification. Default value is `false`.
+ enableMetrics:
+ type: boolean
+ description: Enable or disable OAuth metrics. The default value is `false`.
+ expireAfterMs:
+ type: integer
+ description: The expiration of the records kept in the local cache to avoid querying the Open Policy Agent for every request. Defines how often the cached authorization decisions are reloaded from the Open Policy Agent server. In milliseconds. Defaults to `3600000`.
+ grantsAlwaysLatest:
+ type: boolean
+ description: "Controls whether the latest grants are fetched for a new session. When enabled, grants are retrieved from Keycloak and cached for the user. The default value is `false`."
+ grantsGcPeriodSeconds:
+ type: integer
+ minimum: 1
+ description: "The time, in seconds, between consecutive runs of a job that cleans stale grants from the cache. The default value is 300."
+ grantsMaxIdleTimeSeconds:
+ type: integer
+ minimum: 1
+ description: "The time, in seconds, after which an idle grant can be evicted from the cache. The default value is 300."
+ grantsRefreshPeriodSeconds:
+ type: integer
+ minimum: 0
+ description: The time between two consecutive grants refresh runs in seconds. The default value is 60.
+ grantsRefreshPoolSize:
+ type: integer
+ minimum: 1
+ description: "The number of threads to use to refresh grants for active sessions. The more threads, the more parallelism, so the sooner the job completes. However, using more threads places a heavier load on the authorization server. The default value is 5."
+ httpRetries:
+ type: integer
+ minimum: 0
+ description: "The maximum number of retries to attempt if an initial HTTP request fails. If not set, the default is to not attempt any retries."
+ includeAcceptHeader:
+ type: boolean
+ description: Whether the Accept header should be set in requests to the authorization servers. The default value is `true`.
+ initialCacheCapacity:
+ type: integer
+ description: Initial capacity of the local cache used by the authorizer to avoid querying the Open Policy Agent for every request Defaults to `5000`.
+ maximumCacheSize:
+ type: integer
+ description: Maximum capacity of the local cache used by the authorizer to avoid querying the Open Policy Agent for every request. Defaults to `50000`.
+ readTimeoutSeconds:
+ type: integer
+ minimum: 1
+ description: "The read timeout in seconds when connecting to authorization server. If not set, the effective read timeout is 60 seconds."
+ superUsers:
+ type: array
+ items:
type: string
- operator:
+ description: "List of super users, which are user principals with unlimited access rights."
+ supportsAdminApi:
+ type: boolean
+ description: Indicates whether the custom authorizer supports the APIs for managing ACLs using the Kafka Admin API. Defaults to `false`.
+ tlsTrustedCertificates:
+ type: array
+ items:
+ type: object
+ properties:
+ secretName:
+ type: string
+ description: The name of the Secret containing the certificate.
+ certificate:
+ type: string
+ description: The name of the file certificate in the secret.
+ pattern:
+ type: string
+ description: "Pattern for the certificate files in the secret. Use the link:https://en.wikipedia.org/wiki/Glob_(programming)[_glob syntax_] for the pattern. All files in the secret that match the pattern are used."
+ oneOf:
+ - properties:
+ certificate: {}
+ required:
+ - certificate
+ - properties:
+ pattern: {}
+ required:
+ - pattern
+ required:
+ - secretName
+ description: Trusted certificates for TLS connection to the OAuth server.
+ tokenEndpointUri:
+ type: string
+ description: Authorization server token endpoint URI.
+ type:
+ type: string
+ enum:
+ - simple
+ - opa
+ - keycloak
+ - custom
+ description: "Authorization type. Currently, the supported types are `simple`, `keycloak`, `opa` and `custom`. `simple` authorization type uses Kafka's built-in authorizer for authorization. `keycloak` authorization type uses Keycloak Authorization Services for authorization. `opa` authorization type uses Open Policy Agent based authorization.`custom` authorization type uses user-provided implementation for authorization."
+ url:
+ type: string
+ example: http://opa:8181/v1/data/kafka/authz/allow
+ description: The URL used to connect to the Open Policy Agent server. The URL has to include the policy which will be queried by the authorizer. This option is required.
+ required:
+ - type
+ description: Authorization configuration for Kafka brokers.
+ rack:
+ type: object
+ properties:
+ topologyKey:
+ type: string
+ example: topology.kubernetes.io/zone
+ description: "A key that matches labels assigned to the Kubernetes cluster nodes. The value of the label is used to set a broker's `broker.rack` config, and the `client.rack` config for Kafka Connect or MirrorMaker 2."
+ required:
+ - topologyKey
+ description: Configuration of the `broker.rack` broker config.
+ brokerRackInitImage:
+ type: string
+ description: The image of the init container used for initializing the `broker.rack`.
+ livenessProbe:
+ type: object
+ properties:
+ initialDelaySeconds:
+ type: integer
+ minimum: 0
+ description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
+ timeoutSeconds:
+ type: integer
+ minimum: 1
+ description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
+ periodSeconds:
+ type: integer
+ minimum: 1
+ description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
+ successThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
+ failureThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
+ description: Pod liveness checking.
+ readinessProbe:
+ type: object
+ properties:
+ initialDelaySeconds:
+ type: integer
+ minimum: 0
+ description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
+ timeoutSeconds:
+ type: integer
+ minimum: 1
+ description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
+ periodSeconds:
+ type: integer
+ minimum: 1
+ description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
+ successThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
+ failureThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
+ description: Pod readiness checking.
+ jvmOptions:
+ type: object
+ properties:
+ "-XX":
+ additionalProperties:
+ type: string
+ type: object
+ description: A map of -XX options to the JVM.
+ "-Xmx":
+ type: string
+ pattern: "^[0-9]+[mMgG]?$"
+ description: -Xmx option to to the JVM.
+ "-Xms":
+ type: string
+ pattern: "^[0-9]+[mMgG]?$"
+ description: -Xms option to to the JVM.
+ gcLoggingEnabled:
+ type: boolean
+ description: Specifies whether the Garbage Collection logging is enabled. The default is false.
+ javaSystemProperties:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: The system property name.
+ value:
+ type: string
+ description: The system property value.
+ description: A map of additional system properties which will be passed using the `-D` option to the JVM.
+ description: JVM Options for pods.
+ jmxOptions:
+ type: object
+ properties:
+ authentication:
+ type: object
+ properties:
+ type:
+ type: string
+ enum:
+ - password
+ description: Authentication type. Currently the only supported types are `password`.`password` type creates a username and protected port with no TLS.
+ required:
+ - type
+ description: Authentication configuration for connecting to the JMX port.
+ description: JMX Options for Kafka brokers.
+ resources:
+ type: object
+ properties:
+ claims:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
+ x-kubernetes-int-or-string: true
+ type: object
+ description: CPU and memory resources to reserve.
+ metricsConfig:
+ type: object
+ properties:
+ type:
+ type: string
+ enum:
+ - jmxPrometheusExporter
+ description: Metrics type. Only 'jmxPrometheusExporter' supported currently.
+ valueFrom:
+ type: object
+ properties:
+ configMapKeyRef:
+ type: object
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ description: Reference to the key in the ConfigMap containing the configuration.
+ description: 'ConfigMap entry where the Prometheus JMX Exporter configuration is stored. '
+ required:
+ - type
+ - valueFrom
+ description: Metrics configuration.
+ logging:
+ type: object
+ properties:
+ loggers:
+ additionalProperties:
type: string
- values:
- type: array
- items:
+ type: object
+ description: A Map from logger name to logger level.
+ type:
+ type: string
+ enum:
+ - inline
+ - external
+ description: "Logging type, must be either 'inline' or 'external'."
+ valueFrom:
+ type: object
+ properties:
+ configMapKeyRef:
+ type: object
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ description: Reference to the key in the ConfigMap containing the configuration.
+ description: '`ConfigMap` entry where the logging configuration is stored. '
+ required:
+ - type
+ description: Logging configuration for Kafka.
+ template:
+ type: object
+ properties:
+ statefulset:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ podManagementPolicy:
+ type: string
+ enum:
+ - OrderedReady
+ - Parallel
+ description: PodManagementPolicy which will be used for this StatefulSet. Valid values are `Parallel` and `OrderedReady`. Defaults to `Parallel`.
+ description: Template for Kafka `StatefulSet`.
+ pod:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ imagePullSecrets:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: "List of references to secrets in the same namespace to use for pulling any of the images used by this Pod. When the `STRIMZI_IMAGE_PULL_SECRETS` environment variable in Cluster Operator and the `imagePullSecrets` option are specified, only the `imagePullSecrets` variable is used and the `STRIMZI_IMAGE_PULL_SECRETS` variable is ignored."
+ securityContext:
+ type: object
+ properties:
+ appArmorProfile:
+ type: object
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ fsGroup:
+ type: integer
+ fsGroupChangePolicy:
+ type: string
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
+ type: object
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ seccompProfile:
+ type: object
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ supplementalGroups:
+ type: array
+ items:
+ type: integer
+ sysctls:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ windowsOptions:
+ type: object
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ description: Configures pod-level security attributes and common container settings.
+ terminationGracePeriodSeconds:
+ type: integer
+ minimum: 0
+ description: "The grace period is the duration in seconds after the processes running in the pod are sent a termination signal, and the time when the processes are forcibly halted with a kill signal. Set this value to longer than the expected cleanup time for your process. Value must be a non-negative integer. A zero value indicates delete immediately. You might need to increase the grace period for very large Kafka clusters, so that the Kafka brokers have enough time to transfer their work to another broker before they are terminated. Defaults to 30 seconds."
+ affinity:
+ type: object
+ properties:
+ nodeAffinity:
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ preference:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchFields:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: object
+ properties:
+ nodeSelectorTerms:
+ type: array
+ items:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchFields:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ podAffinity:
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ podAffinityTerm:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ matchLabelKeys:
+ type: array
+ items:
+ type: string
+ mismatchLabelKeys:
+ type: array
+ items:
+ type: string
+ namespaceSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ matchLabelKeys:
+ type: array
+ items:
+ type: string
+ mismatchLabelKeys:
+ type: array
+ items:
+ type: string
+ namespaceSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ podAntiAffinity:
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ podAffinityTerm:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ matchLabelKeys:
+ type: array
+ items:
+ type: string
+ mismatchLabelKeys:
+ type: array
+ items:
+ type: string
+ namespaceSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ matchLabelKeys:
+ type: array
+ items:
+ type: string
+ mismatchLabelKeys:
+ type: array
+ items:
+ type: string
+ namespaceSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ description: The pod's affinity rules.
+ tolerations:
+ type: array
+ items:
+ type: object
+ properties:
+ effect:
+ type: string
+ key:
+ type: string
+ operator:
+ type: string
+ tolerationSeconds:
+ type: integer
+ value:
+ type: string
+ description: The pod's tolerations.
+ topologySpreadConstraints:
+ type: array
+ items:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ matchLabelKeys:
+ type: array
+ items:
+ type: string
+ maxSkew:
+ type: integer
+ minDomains:
+ type: integer
+ nodeAffinityPolicy:
+ type: string
+ nodeTaintsPolicy:
+ type: string
+ topologyKey:
+ type: string
+ whenUnsatisfiable:
+ type: string
+ description: The pod's topology spread constraints.
+ priorityClassName:
type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- description: "Selector is a label query which matches all the pods managed by this `StrimziPodSet`. Only `matchLabels` is supported. If `matchExpressions` is set, it will be ignored."
- pods:
- type: array
- items:
- x-kubernetes-preserve-unknown-fields: true
- type: object
- description: The Pods managed by this StrimziPodSet.
- required:
- - selector
- - pods
- description: The specification of the StrimziPodSet.
- status:
- type: object
- properties:
- conditions:
- type: array
- items:
- type: object
- properties:
- type:
- type: string
- description: "The unique identifier of a condition, used to distinguish between other conditions in the resource."
- status:
- type: string
- description: "The status of the condition, either True, False or Unknown."
- lastTransitionTime:
- type: string
- description: "Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone."
- reason:
- type: string
- description: The reason for the condition's last transition (a single word in CamelCase).
- message:
- type: string
- description: Human-readable message indicating details about the condition's last transition.
- description: List of status conditions.
- observedGeneration:
- type: integer
- description: The generation of the CRD that was last reconciled by the operator.
- pods:
- type: integer
- description: Number of pods managed by this `StrimziPodSet` resource.
- readyPods:
- type: integer
- description: Number of pods managed by this `StrimziPodSet` resource that are ready.
- currentPods:
- type: integer
- description: Number of pods managed by this `StrimziPodSet` resource that have the current revision.
- description: The status of the StrimziPodSet.
-
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
- name: strimzi-cluster-operator-kafka-client-delegation
- labels:
- app: strimzi
-# The Kafka clients cluster role must be bound to the cluster operator service account so that it can delegate the
-# cluster role to the Kafka clients using it for consuming from closest replica.
-# This must be done to avoid escalating privileges which would be blocked by Kubernetes.
-subjects:
- - kind: ServiceAccount
- name: strimzi-cluster-operator
- namespace: myproject
-roleRef:
- kind: ClusterRole
- name: strimzi-kafka-client
- apiGroup: rbac.authorization.k8s.io
-
----
-apiVersion: apiextensions.k8s.io/v1
-kind: CustomResourceDefinition
-metadata:
- name: kafkausers.kafka.strimzi.io
- labels:
- app: strimzi
- strimzi.io/crd-install: "true"
-spec:
- group: kafka.strimzi.io
- names:
- kind: KafkaUser
- listKind: KafkaUserList
- singular: kafkauser
- plural: kafkausers
- shortNames:
- - ku
- categories:
- - strimzi
- scope: Namespaced
- conversion:
- strategy: None
- versions:
- - name: v1beta2
- served: true
- storage: true
- subresources:
- status: {}
- additionalPrinterColumns:
- - name: Cluster
- description: The name of the Kafka cluster this user belongs to
- jsonPath: .metadata.labels.strimzi\.io/cluster
- type: string
- - name: Authentication
- description: How the user is authenticated
- jsonPath: .spec.authentication.type
- type: string
- - name: Authorization
- description: How the user is authorised
- jsonPath: .spec.authorization.type
- type: string
- - name: Ready
- description: The state of the custom resource
- jsonPath: ".status.conditions[?(@.type==\"Ready\")].status"
- type: string
- schema:
- openAPIV3Schema:
- type: object
- properties:
- apiVersion:
- type: string
- description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources"
- kind:
- type: string
- description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
- metadata:
- type: object
- spec:
- type: object
- properties:
- authentication:
- type: object
- properties:
- password:
- type: object
- properties:
- valueFrom:
+ description: 'The name of the priority class used to assign priority to the pods. '
+ schedulerName:
+ type: string
+ description: "The name of the scheduler used to dispatch this `Pod`. If not specified, the default scheduler will be used."
+ hostAliases:
+ type: array
+ items:
+ type: object
+ properties:
+ hostnames:
+ type: array
+ items:
+ type: string
+ ip:
+ type: string
+ description: The pod's HostAliases. HostAliases is an optional list of hosts and IPs that will be injected into the Pod's hosts file if specified.
+ enableServiceLinks:
+ type: boolean
+ description: Indicates whether information about services should be injected into Pod's environment variables.
+ tmpDirSizeLimit:
+ type: string
+ pattern: "^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
+ description: "Defines the total amount of pod memory allocated for the temporary `EmptyDir` volume `/tmp`. Specify the allocation in memory units, for example, `100Mi` for 100 mebibytes. Default value is `5Mi`. The `/tmp` volume is backed by pod memory, not disk storage, so avoid setting a high value as it consumes pod memory resources."
+ volumes:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: Name to use for the volume. Required.
+ secret:
+ type: object
+ properties:
+ defaultMode:
+ type: integer
+ items:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ optional:
+ type: boolean
+ secretName:
+ type: string
+ description: Secret to use populate the volume.
+ configMap:
+ type: object
+ properties:
+ defaultMode:
+ type: integer
+ items:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ description: ConfigMap to use to populate the volume.
+ emptyDir:
+ type: object
+ properties:
+ medium:
+ type: string
+ sizeLimit:
+ type: object
+ properties:
+ amount:
+ type: string
+ format:
+ type: string
+ description: EmptyDir to use to populate the volume.
+ persistentVolumeClaim:
+ type: object
+ properties:
+ claimName:
+ type: string
+ readOnly:
+ type: boolean
+ description: PersistentVolumeClaim object to use to populate the volume.
+ oneOf:
+ - properties:
+ secret: {}
+ configMap: {}
+ emptyDir: {}
+ persistentVolumeClaim: {}
+ required: []
+ description: Additional volumes that can be mounted to the pod.
+ description: Template for Kafka `Pods`.
+ bootstrapService:
type: object
properties:
- secretKeyRef:
+ metadata:
type: object
properties:
- key:
- type: string
- name:
- type: string
- optional:
- type: boolean
- description: Selects a key of a Secret in the resource's namespace.
- description: Secret from which the password should be read.
- required:
- - valueFrom
- description: "Specify the password for the user. If not set, a new password is generated by the User Operator."
- type:
- type: string
- enum:
- - tls
- - tls-external
- - scram-sha-512
- description: Authentication type.
- required:
- - type
- description: "Authentication mechanism enabled for this Kafka user. The supported authentication mechanisms are `scram-sha-512`, `tls`, and `tls-external`. \n\n* `scram-sha-512` generates a secret with SASL SCRAM-SHA-512 credentials.\n* `tls` generates a secret with user certificate for mutual TLS authentication.\n* `tls-external` does not generate a user certificate. But prepares the user for using mutual TLS authentication using a user certificate generated outside the User Operator.\n ACLs and quotas set for this user are configured in the `CN=` format.\n\nAuthentication is optional. If authentication is not configured, no credentials are generated. ACLs and quotas set for the user are configured in the `` format suitable for SASL authentication."
- authorization:
- type: object
- properties:
- acls:
- type: array
- items:
- type: object
- properties:
- type:
- type: string
- enum:
- - allow
- - deny
- description: The type of the rule. Currently the only supported type is `allow`. ACL rules with type `allow` are used to allow user to execute the specified operations. Default value is `allow`.
- resource:
- type: object
- properties:
- name:
- type: string
- description: Name of resource for which given ACL rule applies. Can be combined with `patternType` field to use prefix pattern.
- patternType:
- type: string
- enum:
- - literal
- - prefix
- description: "Describes the pattern used in the resource field. The supported types are `literal` and `prefix`. With `literal` pattern type, the resource field will be used as a definition of a full name. With `prefix` pattern type, the resource name will be used only as a prefix. Default value is `literal`."
- type:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ ipFamilyPolicy:
+ type: string
+ enum:
+ - SingleStack
+ - PreferDualStack
+ - RequireDualStack
+ description: "Specifies the IP Family Policy used by the service. Available options are `SingleStack`, `PreferDualStack` and `RequireDualStack`. `SingleStack` is for a single IP family. `PreferDualStack` is for two IP families on dual-stack configured clusters or a single IP family on single-stack clusters. `RequireDualStack` fails unless there are two IP families on dual-stack configured clusters. If unspecified, Kubernetes will choose the default value based on the service type."
+ ipFamilies:
+ type: array
+ items:
type: string
enum:
- - topic
- - group
- - cluster
- - transactionalId
- description: "Resource type. The available resource types are `topic`, `group`, `cluster`, and `transactionalId`."
- required:
- - type
- description: Indicates the resource for which given ACL rule applies.
- host:
- type: string
- description: "The host from which the action described in the ACL rule is allowed or denied. If not set, it defaults to `*`, allowing or denying the action from any host."
- operation:
- type: string
- enum:
- - Read
- - Write
- - Create
- - Delete
- - Alter
- - Describe
- - ClusterAction
- - AlterConfigs
- - DescribeConfigs
- - IdempotentWrite
- - All
- description: "Operation which will be allowed or denied. Supported operations are: Read, Write, Create, Delete, Alter, Describe, ClusterAction, AlterConfigs, DescribeConfigs, IdempotentWrite and All."
- operations:
- type: array
- items:
+ - IPv4
+ - IPv6
+ description: "Specifies the IP Families used by the service. Available options are `IPv4` and `IPv6`. If unspecified, Kubernetes will choose the default value based on the `ipFamilyPolicy` setting."
+ description: Template for Kafka bootstrap `Service`.
+ brokersService:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ ipFamilyPolicy:
type: string
enum:
- - Read
- - Write
- - Create
- - Delete
- - Alter
- - Describe
- - ClusterAction
- - AlterConfigs
- - DescribeConfigs
- - IdempotentWrite
- - All
- description: "List of operations which will be allowed or denied. Supported operations are: Read, Write, Create, Delete, Alter, Describe, ClusterAction, AlterConfigs, DescribeConfigs, IdempotentWrite and All."
- required:
- - resource
- description: List of ACL rules which should be applied to this user.
- type:
- type: string
- enum:
- - simple
- description: Authorization type. Currently the only supported type is `simple`. `simple` authorization type uses the Kafka Admin API for managing the ACL rules.
- required:
- - acls
- - type
- description: Authorization rules for this Kafka user.
- quotas:
- type: object
- properties:
- producerByteRate:
- type: integer
- minimum: 0
- description: A quota on the maximum bytes per-second that each client group can publish to a broker before the clients in the group are throttled. Defined on a per-broker basis.
- consumerByteRate:
- type: integer
- minimum: 0
- description: A quota on the maximum bytes per-second that each client group can fetch from a broker before the clients in the group are throttled. Defined on a per-broker basis.
- requestPercentage:
- type: integer
- minimum: 0
- description: A quota on the maximum CPU utilization of each client group as a percentage of network and I/O threads.
- controllerMutationRate:
- type: number
- minimum: 0
- description: "A quota on the rate at which mutations are accepted for the create topics request, the create partitions request and the delete topics request. The rate is accumulated by the number of partitions created or deleted."
- description: Quotas on requests to control the broker resources used by clients. Network bandwidth and request rate quotas can be enforced.Kafka documentation for Kafka User quotas can be found at http://kafka.apache.org/documentation/#design_quotas.
- template:
- type: object
- properties:
- secret:
- type: object
- properties:
- metadata:
+ - SingleStack
+ - PreferDualStack
+ - RequireDualStack
+ description: "Specifies the IP Family Policy used by the service. Available options are `SingleStack`, `PreferDualStack` and `RequireDualStack`. `SingleStack` is for a single IP family. `PreferDualStack` is for two IP families on dual-stack configured clusters or a single IP family on single-stack clusters. `RequireDualStack` fails unless there are two IP families on dual-stack configured clusters. If unspecified, Kubernetes will choose the default value based on the service type."
+ ipFamilies:
+ type: array
+ items:
+ type: string
+ enum:
+ - IPv4
+ - IPv6
+ description: "Specifies the IP Families used by the service. Available options are `IPv4` and `IPv6`. If unspecified, Kubernetes will choose the default value based on the `ipFamilyPolicy` setting."
+ description: Template for Kafka broker `Service`.
+ externalBootstrapService:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ description: Template for Kafka external bootstrap `Service`.
+ perPodService:
type: object
properties:
- labels:
- additionalProperties:
- type: string
+ metadata:
type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ description: Template for Kafka per-pod `Services` used for access from outside of Kubernetes.
+ externalBootstrapRoute:
+ type: object
+ properties:
+ metadata:
type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- description: Template for KafkaUser resources. The template allows users to specify how the `Secret` with password or TLS certificates is generated.
- description: Template to specify how Kafka User `Secrets` are generated.
- description: The specification of the user.
- status:
- type: object
- properties:
- conditions:
- type: array
- items:
- type: object
- properties:
- type:
- type: string
- description: "The unique identifier of a condition, used to distinguish between other conditions in the resource."
- status:
- type: string
- description: "The status of the condition, either True, False or Unknown."
- lastTransitionTime:
- type: string
- description: "Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone."
- reason:
- type: string
- description: The reason for the condition's last transition (a single word in CamelCase).
- message:
- type: string
- description: Human-readable message indicating details about the condition's last transition.
- description: List of status conditions.
- observedGeneration:
- type: integer
- description: The generation of the CRD that was last reconciled by the operator.
- username:
- type: string
- description: Username.
- secret:
- type: string
- description: The name of `Secret` where the credentials are stored.
- description: The status of the Kafka User.
- - name: v1beta1
- served: true
- storage: false
- subresources:
- status: {}
- additionalPrinterColumns:
- - name: Cluster
- description: The name of the Kafka cluster this user belongs to
- jsonPath: .metadata.labels.strimzi\.io/cluster
- type: string
- - name: Authentication
- description: How the user is authenticated
- jsonPath: .spec.authentication.type
- type: string
- - name: Authorization
- description: How the user is authorised
- jsonPath: .spec.authorization.type
- type: string
- - name: Ready
- description: The state of the custom resource
- jsonPath: ".status.conditions[?(@.type==\"Ready\")].status"
- type: string
- schema:
- openAPIV3Schema:
- type: object
- properties:
- apiVersion:
- type: string
- description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources"
- kind:
- type: string
- description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
- metadata:
- type: object
- spec:
- type: object
- properties:
- authentication:
- type: object
- properties:
- password:
- type: object
- properties:
- valueFrom:
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ description: Template for Kafka external bootstrap `Route`.
+ perPodRoute:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ description: Template for Kafka per-pod `Routes` used for access from outside of OpenShift.
+ externalBootstrapIngress:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ description: Template for Kafka external bootstrap `Ingress`.
+ perPodIngress:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ description: Template for Kafka per-pod `Ingress` used for access from outside of Kubernetes.
+ persistentVolumeClaim:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ description: Template for all Kafka `PersistentVolumeClaims`.
+ podDisruptionBudget:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata to apply to the `PodDisruptionBudgetTemplate` resource.
+ maxUnavailable:
+ type: integer
+ minimum: 0
+ description: "Maximum number of unavailable pods to allow automatic Pod eviction. A Pod eviction is allowed when the `maxUnavailable` number of pods or fewer are unavailable after the eviction. Setting this value to 0 prevents all voluntary evictions, so the pods must be evicted manually. Defaults to 1."
+ description: Template for Kafka `PodDisruptionBudget`.
+ kafkaContainer:
+ type: object
+ properties:
+ env:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: The environment variable key.
+ value:
+ type: string
+ description: The environment variable value.
+ description: Environment variables which should be applied to the container.
+ securityContext:
+ type: object
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ appArmorProfile:
+ type: object
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ capabilities:
+ type: object
+ properties:
+ add:
+ type: array
+ items:
+ type: string
+ drop:
+ type: array
+ items:
+ type: string
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
+ type: object
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ seccompProfile:
+ type: object
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ windowsOptions:
+ type: object
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ description: Security context for the container.
+ volumeMounts:
+ type: array
+ items:
+ type: object
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ recursiveReadOnly:
+ type: string
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ description: Additional volume mounts which should be applied to the container.
+ description: Template for the Kafka broker container.
+ initContainer:
type: object
properties:
- secretKeyRef:
+ env:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: The environment variable key.
+ value:
+ type: string
+ description: The environment variable value.
+ description: Environment variables which should be applied to the container.
+ securityContext:
type: object
properties:
- key:
- type: string
- name:
+ allowPrivilegeEscalation:
+ type: boolean
+ appArmorProfile:
+ type: object
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ capabilities:
+ type: object
+ properties:
+ add:
+ type: array
+ items:
+ type: string
+ drop:
+ type: array
+ items:
+ type: string
+ privileged:
+ type: boolean
+ procMount:
type: string
- optional:
+ readOnlyRootFilesystem:
type: boolean
- description: Selects a key of a Secret in the resource's namespace.
- description: Secret from which the password should be read.
- required:
- - valueFrom
- description: "Specify the password for the user. If not set, a new password is generated by the User Operator."
- type:
- type: string
- enum:
- - tls
- - tls-external
- - scram-sha-512
- description: Authentication type.
- required:
- - type
- description: "Authentication mechanism enabled for this Kafka user. The supported authentication mechanisms are `scram-sha-512`, `tls`, and `tls-external`. \n\n* `scram-sha-512` generates a secret with SASL SCRAM-SHA-512 credentials.\n* `tls` generates a secret with user certificate for mutual TLS authentication.\n* `tls-external` does not generate a user certificate. But prepares the user for using mutual TLS authentication using a user certificate generated outside the User Operator.\n ACLs and quotas set for this user are configured in the `CN=` format.\n\nAuthentication is optional. If authentication is not configured, no credentials are generated. ACLs and quotas set for the user are configured in the `` format suitable for SASL authentication."
- authorization:
- type: object
- properties:
- acls:
- type: array
- items:
- type: object
- properties:
- type:
- type: string
- enum:
- - allow
- - deny
- description: The type of the rule. Currently the only supported type is `allow`. ACL rules with type `allow` are used to allow user to execute the specified operations. Default value is `allow`.
- resource:
- type: object
- properties:
- name:
- type: string
- description: Name of resource for which given ACL rule applies. Can be combined with `patternType` field to use prefix pattern.
- patternType:
- type: string
- enum:
- - literal
- - prefix
- description: "Describes the pattern used in the resource field. The supported types are `literal` and `prefix`. With `literal` pattern type, the resource field will be used as a definition of a full name. With `prefix` pattern type, the resource name will be used only as a prefix. Default value is `literal`."
- type:
- type: string
- enum:
- - topic
- - group
- - cluster
- - transactionalId
- description: "Resource type. The available resource types are `topic`, `group`, `cluster`, and `transactionalId`."
- required:
- - type
- description: Indicates the resource for which given ACL rule applies.
- host:
- type: string
- description: "The host from which the action described in the ACL rule is allowed or denied. If not set, it defaults to `*`, allowing or denying the action from any host."
- operation:
- type: string
- enum:
- - Read
- - Write
- - Create
- - Delete
- - Alter
- - Describe
- - ClusterAction
- - AlterConfigs
- - DescribeConfigs
- - IdempotentWrite
- - All
- description: "Operation which will be allowed or denied. Supported operations are: Read, Write, Create, Delete, Alter, Describe, ClusterAction, AlterConfigs, DescribeConfigs, IdempotentWrite and All."
- operations:
- type: array
- items:
- type: string
- enum:
- - Read
- - Write
- - Create
- - Delete
- - Alter
- - Describe
- - ClusterAction
- - AlterConfigs
- - DescribeConfigs
- - IdempotentWrite
- - All
- description: "List of operations which will be allowed or denied. Supported operations are: Read, Write, Create, Delete, Alter, Describe, ClusterAction, AlterConfigs, DescribeConfigs, IdempotentWrite and All."
- required:
- - resource
- description: List of ACL rules which should be applied to this user.
- type:
- type: string
- enum:
- - simple
- description: Authorization type. Currently the only supported type is `simple`. `simple` authorization type uses the Kafka Admin API for managing the ACL rules.
- required:
- - acls
- - type
- description: Authorization rules for this Kafka user.
- quotas:
- type: object
- properties:
- producerByteRate:
- type: integer
- minimum: 0
- description: A quota on the maximum bytes per-second that each client group can publish to a broker before the clients in the group are throttled. Defined on a per-broker basis.
- consumerByteRate:
- type: integer
- minimum: 0
- description: A quota on the maximum bytes per-second that each client group can fetch from a broker before the clients in the group are throttled. Defined on a per-broker basis.
- requestPercentage:
- type: integer
- minimum: 0
- description: A quota on the maximum CPU utilization of each client group as a percentage of network and I/O threads.
- controllerMutationRate:
- type: number
- minimum: 0
- description: "A quota on the rate at which mutations are accepted for the create topics request, the create partitions request and the delete topics request. The rate is accumulated by the number of partitions created or deleted."
- description: Quotas on requests to control the broker resources used by clients. Network bandwidth and request rate quotas can be enforced.Kafka documentation for Kafka User quotas can be found at http://kafka.apache.org/documentation/#design_quotas.
- template:
- type: object
- properties:
- secret:
- type: object
- properties:
- metadata:
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
+ type: object
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ seccompProfile:
+ type: object
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ windowsOptions:
+ type: object
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ description: Security context for the container.
+ volumeMounts:
+ type: array
+ items:
+ type: object
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ recursiveReadOnly:
+ type: string
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ description: Additional volume mounts which should be applied to the container.
+ description: Template for the Kafka init container.
+ clusterCaCert:
type: object
properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
+ metadata:
type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- description: Template for KafkaUser resources. The template allows users to specify how the `Secret` with password or TLS certificates is generated.
- description: Template to specify how Kafka User `Secrets` are generated.
- description: The specification of the user.
- status:
- type: object
- properties:
- conditions:
- type: array
- items:
- type: object
- properties:
- type:
- type: string
- description: "The unique identifier of a condition, used to distinguish between other conditions in the resource."
- status:
- type: string
- description: "The status of the condition, either True, False or Unknown."
- lastTransitionTime:
- type: string
- description: "Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone."
- reason:
- type: string
- description: The reason for the condition's last transition (a single word in CamelCase).
- message:
- type: string
- description: Human-readable message indicating details about the condition's last transition.
- description: List of status conditions.
- observedGeneration:
- type: integer
- description: The generation of the CRD that was last reconciled by the operator.
- username:
- type: string
- description: Username.
- secret:
- type: string
- description: The name of `Secret` where the credentials are stored.
- description: The status of the Kafka User.
- - name: v1alpha1
- served: true
- storage: false
- subresources:
- status: {}
- additionalPrinterColumns:
- - name: Cluster
- description: The name of the Kafka cluster this user belongs to
- jsonPath: .metadata.labels.strimzi\.io/cluster
- type: string
- - name: Authentication
- description: How the user is authenticated
- jsonPath: .spec.authentication.type
- type: string
- - name: Authorization
- description: How the user is authorised
- jsonPath: .spec.authorization.type
- type: string
- - name: Ready
- description: The state of the custom resource
- jsonPath: ".status.conditions[?(@.type==\"Ready\")].status"
- type: string
- schema:
- openAPIV3Schema:
- type: object
- properties:
- apiVersion:
- type: string
- description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources"
- kind:
- type: string
- description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
- metadata:
- type: object
- spec:
- type: object
- properties:
- authentication:
- type: object
- properties:
- password:
- type: object
- properties:
- valueFrom:
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ description: Template for Secret with Kafka Cluster certificate public key.
+ serviceAccount:
type: object
properties:
- secretKeyRef:
+ metadata:
type: object
properties:
- key:
- type: string
- name:
- type: string
- optional:
- type: boolean
- description: Selects a key of a Secret in the resource's namespace.
- description: Secret from which the password should be read.
- required:
- - valueFrom
- description: "Specify the password for the user. If not set, a new password is generated by the User Operator."
- type:
- type: string
- enum:
- - tls
- - tls-external
- - scram-sha-512
- description: Authentication type.
- required:
- - type
- description: "Authentication mechanism enabled for this Kafka user. The supported authentication mechanisms are `scram-sha-512`, `tls`, and `tls-external`. \n\n* `scram-sha-512` generates a secret with SASL SCRAM-SHA-512 credentials.\n* `tls` generates a secret with user certificate for mutual TLS authentication.\n* `tls-external` does not generate a user certificate. But prepares the user for using mutual TLS authentication using a user certificate generated outside the User Operator.\n ACLs and quotas set for this user are configured in the `CN=` format.\n\nAuthentication is optional. If authentication is not configured, no credentials are generated. ACLs and quotas set for the user are configured in the `` format suitable for SASL authentication."
- authorization:
- type: object
- properties:
- acls:
- type: array
- items:
- type: object
- properties:
- type:
- type: string
- enum:
- - allow
- - deny
- description: The type of the rule. Currently the only supported type is `allow`. ACL rules with type `allow` are used to allow user to execute the specified operations. Default value is `allow`.
- resource:
- type: object
- properties:
- name:
- type: string
- description: Name of resource for which given ACL rule applies. Can be combined with `patternType` field to use prefix pattern.
- patternType:
- type: string
- enum:
- - literal
- - prefix
- description: "Describes the pattern used in the resource field. The supported types are `literal` and `prefix`. With `literal` pattern type, the resource field will be used as a definition of a full name. With `prefix` pattern type, the resource name will be used only as a prefix. Default value is `literal`."
- type:
- type: string
- enum:
- - topic
- - group
- - cluster
- - transactionalId
- description: "Resource type. The available resource types are `topic`, `group`, `cluster`, and `transactionalId`."
- required:
- - type
- description: Indicates the resource for which given ACL rule applies.
- host:
- type: string
- description: "The host from which the action described in the ACL rule is allowed or denied. If not set, it defaults to `*`, allowing or denying the action from any host."
- operation:
- type: string
- enum:
- - Read
- - Write
- - Create
- - Delete
- - Alter
- - Describe
- - ClusterAction
- - AlterConfigs
- - DescribeConfigs
- - IdempotentWrite
- - All
- description: "Operation which will be allowed or denied. Supported operations are: Read, Write, Create, Delete, Alter, Describe, ClusterAction, AlterConfigs, DescribeConfigs, IdempotentWrite and All."
- operations:
- type: array
- items:
- type: string
- enum:
- - Read
- - Write
- - Create
- - Delete
- - Alter
- - Describe
- - ClusterAction
- - AlterConfigs
- - DescribeConfigs
- - IdempotentWrite
- - All
- description: "List of operations which will be allowed or denied. Supported operations are: Read, Write, Create, Delete, Alter, Describe, ClusterAction, AlterConfigs, DescribeConfigs, IdempotentWrite and All."
- required:
- - resource
- description: List of ACL rules which should be applied to this user.
- type:
- type: string
- enum:
- - simple
- description: Authorization type. Currently the only supported type is `simple`. `simple` authorization type uses the Kafka Admin API for managing the ACL rules.
- required:
- - acls
- - type
- description: Authorization rules for this Kafka user.
- quotas:
- type: object
- properties:
- producerByteRate:
- type: integer
- minimum: 0
- description: A quota on the maximum bytes per-second that each client group can publish to a broker before the clients in the group are throttled. Defined on a per-broker basis.
- consumerByteRate:
- type: integer
- minimum: 0
- description: A quota on the maximum bytes per-second that each client group can fetch from a broker before the clients in the group are throttled. Defined on a per-broker basis.
- requestPercentage:
- type: integer
- minimum: 0
- description: A quota on the maximum CPU utilization of each client group as a percentage of network and I/O threads.
- controllerMutationRate:
- type: number
- minimum: 0
- description: "A quota on the rate at which mutations are accepted for the create topics request, the create partitions request and the delete topics request. The rate is accumulated by the number of partitions created or deleted."
- description: Quotas on requests to control the broker resources used by clients. Network bandwidth and request rate quotas can be enforced.Kafka documentation for Kafka User quotas can be found at http://kafka.apache.org/documentation/#design_quotas.
- template:
- type: object
- properties:
- secret:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ description: Template for the Kafka service account.
+ jmxSecret:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ description: Template for Secret of the Kafka Cluster JMX authentication.
+ clusterRoleBinding:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ description: Template for the Kafka ClusterRoleBinding.
+ podSet:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ description: Template for Kafka `StrimziPodSet` resource.
+ description: Template for Kafka cluster resources. The template allows users to specify how the Kubernetes resources are generated.
+ tieredStorage:
type: object
properties:
- metadata:
+ remoteStorageManager:
type: object
properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
+ className:
+ type: string
+ description: The class name for the `RemoteStorageManager` implementation.
+ classPath:
+ type: string
+ description: The class path for the `RemoteStorageManager` implementation.
+ config:
additionalProperties:
type: string
type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- description: Template for KafkaUser resources. The template allows users to specify how the `Secret` with password or TLS certificates is generated.
- description: Template to specify how Kafka User `Secrets` are generated.
- description: The specification of the user.
- status:
- type: object
- properties:
- conditions:
- type: array
- items:
- type: object
- properties:
- type:
- type: string
- description: "The unique identifier of a condition, used to distinguish between other conditions in the resource."
- status:
- type: string
- description: "The status of the condition, either True, False or Unknown."
- lastTransitionTime:
- type: string
- description: "Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone."
- reason:
- type: string
- description: The reason for the condition's last transition (a single word in CamelCase).
- message:
- type: string
- description: Human-readable message indicating details about the condition's last transition.
- description: List of status conditions.
- observedGeneration:
- type: integer
- description: The generation of the CRD that was last reconciled by the operator.
- username:
- type: string
- description: Username.
- secret:
- type: string
- description: The name of `Secret` where the credentials are stored.
- description: The status of the Kafka User.
-
----
-apiVersion: apiextensions.k8s.io/v1
-kind: CustomResourceDefinition
-metadata:
- name: kafkas.kafka.strimzi.io
- labels:
- app: strimzi
- strimzi.io/crd-install: "true"
-spec:
- group: kafka.strimzi.io
- names:
- kind: Kafka
- listKind: KafkaList
- singular: kafka
- plural: kafkas
- shortNames:
- - k
- categories:
- - strimzi
- scope: Namespaced
- conversion:
- strategy: None
- versions:
- - name: v1beta2
- served: true
- storage: true
- subresources:
- status: {}
- additionalPrinterColumns:
- - name: Desired Kafka replicas
- description: The desired number of Kafka replicas in the cluster
- jsonPath: .spec.kafka.replicas
- type: integer
- - name: Desired ZK replicas
- description: The desired number of ZooKeeper replicas in the cluster
- jsonPath: .spec.zookeeper.replicas
- type: integer
- - name: Ready
- description: The state of the custom resource
- jsonPath: ".status.conditions[?(@.type==\"Ready\")].status"
- type: string
- - name: Metadata State
- description: The state of the cluster metadata
- jsonPath: .status.kafkaMetadataState
- type: string
- - name: Warnings
- description: Warnings related to the custom resource
- jsonPath: ".status.conditions[?(@.type==\"Warning\")].status"
- type: string
- schema:
- openAPIV3Schema:
- type: object
- properties:
- apiVersion:
- type: string
- description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources"
- kind:
- type: string
- description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
- metadata:
- type: object
- spec:
- type: object
- properties:
- kafka:
+ description: "The additional configuration map for the `RemoteStorageManager` implementation. Keys will be automatically prefixed with `rsm.config.`, and added to Kafka broker configuration."
+ description: Configuration for the Remote Storage Manager.
+ type:
+ type: string
+ enum:
+ - custom
+ description: "Storage type, only 'custom' is supported at the moment."
+ required:
+ - type
+ description: Configure the tiered storage feature for Kafka brokers.
+ quotas:
+ type: object
+ properties:
+ consumerByteRate:
+ type: integer
+ minimum: 0
+ description: "A per-broker byte-rate quota for clients consuming from a broker, independent of their number. If clients consume at maximum speed, the quota is shared equally between all non-excluded consumers. Otherwise, the quota is divided based on each client's consumption rate."
+ controllerMutationRate:
+ type: number
+ minimum: 0
+ description: "The default client quota on the rate at which mutations are accepted per second for create topic requests, create partition requests, and delete topic requests, defined for each broker. The mutations rate is measured by the number of partitions created or deleted. Applied on a per-broker basis."
+ excludedPrincipals:
+ type: array
+ items:
+ type: string
+ description: "List of principals that are excluded from the quota. The principals have to be prefixed with `User:`, for example `User:my-user;User:CN=my-other-user`."
+ minAvailableBytesPerVolume:
+ type: integer
+ minimum: 0
+ description: Stop message production if the available size (in bytes) of the storage is lower than or equal to this specified value. This condition is mutually exclusive with `minAvailableRatioPerVolume`.
+ minAvailableRatioPerVolume:
+ type: number
+ minimum: 0
+ maximum: 1
+ description: Stop message production if the percentage of available storage space falls below or equals the specified ratio (set as a decimal representing a percentage). This condition is mutually exclusive with `minAvailableBytesPerVolume`.
+ producerByteRate:
+ type: integer
+ minimum: 0
+ description: "A per-broker byte-rate quota for clients producing to a broker, independent of their number. If clients produce at maximum speed, the quota is shared equally between all non-excluded producers. Otherwise, the quota is divided based on each client's production rate."
+ requestPercentage:
+ type: integer
+ minimum: 0
+ description: The default client quota limits the maximum CPU utilization of each client as a percentage of the network and I/O threads of each broker. Applied on a per-broker basis.
+ type:
+ type: string
+ enum:
+ - kafka
+ - strimzi
+ description: "Quotas plugin type. Currently, the supported types are `kafka` and `strimzi`. `kafka` quotas type uses Kafka's built-in quotas plugin. `strimzi` quotas type uses Strimzi quotas plugin."
+ required:
+ - type
+ description: "Quotas plugin configuration for Kafka brokers allows setting quotas for disk usage, produce/fetch rates, and more. Supported plugin types include `kafka` (default) and `strimzi`. If not specified, the default `kafka` quotas plugin is used."
+ required:
+ - listeners
+ description: Configuration of the Kafka cluster.
+ zookeeper:
type: object
properties:
- version:
- type: string
- description: The Kafka broker version. Defaults to the latest version. Consult the user documentation to understand the process required to upgrade or downgrade the version.
- metadataVersion:
- type: string
- description: "The KRaft metadata version used by the Kafka cluster. This property is ignored when running in ZooKeeper mode. If the property is not set, it defaults to the metadata version that corresponds to the `version` property."
replicas:
type: integer
minimum: 1
- description: The number of pods in the cluster. This property is required when node pools are not used.
+ description: The number of pods in the cluster.
image:
type: string
- description: "The container image used for Kafka pods. If the property is not set, the default Kafka image version is determined based on the `version` configuration. The image names are specifically mapped to corresponding versions in the Cluster Operator configuration. Changing the Kafka image version does not automatically update the image versions for other components, such as Kafka Exporter. "
- listeners:
- type: array
- minItems: 1
- items:
- type: object
- properties:
- name:
+ description: "The container image used for ZooKeeper pods. If no image name is explicitly specified, it is determined based on the Kafka version set in `spec.kafka.version`. The image names are specifically mapped to corresponding versions in the Cluster Operator configuration."
+ storage:
+ type: object
+ properties:
+ class:
+ type: string
+ description: The storage class to use for dynamic volume allocation.
+ deleteClaim:
+ type: boolean
+ description: Specifies if the persistent volume claim has to be deleted when the cluster is un-deployed.
+ id:
+ type: integer
+ minimum: 0
+ description: Storage identification number. Mandatory for storage volumes defined with a `jbod` storage type configuration.
+ kraftMetadata:
+ type: string
+ enum:
+ - shared
+ description: "Specifies whether this volume should be used for storing KRaft metadata. This property is optional. When set, the only currently supported value is `shared`. At most one volume can have this property set."
+ overrides:
+ type: array
+ items:
+ type: object
+ properties:
+ class:
+ type: string
+ description: The storage class to use for dynamic volume allocation for this broker.
+ broker:
+ type: integer
+ description: Id of the kafka broker (broker identifier).
+ description: Overrides for individual brokers. The `overrides` field allows you to specify a different configuration for different brokers.
+ selector:
+ additionalProperties:
type: string
- pattern: "^[a-z0-9]{1,11}$"
- description: Name of the listener. The name will be used to identify the listener and the related Kubernetes objects. The name has to be unique within given a Kafka cluster. The name can consist of lowercase characters and numbers and be up to 11 characters long.
- port:
- type: integer
- minimum: 9092
- description: "Port number used by the listener inside Kafka. The port number has to be unique within a given Kafka cluster. Allowed port numbers are 9092 and higher with the exception of ports 9404 and 9999, which are already used for Prometheus and JMX. Depending on the listener type, the port number might not be the same as the port number that connects Kafka clients."
- type:
+ type: object
+ description: Specifies a specific persistent volume to use. It contains key:value pairs representing labels for selecting such a volume.
+ size:
+ type: string
+ description: "When `type=persistent-claim`, defines the size of the persistent volume claim, such as 100Gi. Mandatory when `type=persistent-claim`."
+ sizeLimit:
+ type: string
+ pattern: "^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
+ description: "When type=ephemeral, defines the total amount of local storage required for this EmptyDir volume (for example 1Gi)."
+ type:
+ type: string
+ enum:
+ - ephemeral
+ - persistent-claim
+ description: "Storage type, must be either 'ephemeral' or 'persistent-claim'."
+ required:
+ - type
+ description: Storage configuration (disk). Cannot be updated.
+ config:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ description: "The ZooKeeper broker config. Properties with the following prefixes cannot be set: server., dataDir, dataLogDir, clientPort, authProvider, quorum.auth, requireClientAuthScheme, snapshot.trust.empty, standaloneEnabled, reconfigEnabled, 4lw.commands.whitelist, secureClientPort, ssl., serverCnxnFactory, sslQuorum (with the exception of: ssl.protocol, ssl.quorum.protocol, ssl.enabledProtocols, ssl.quorum.enabledProtocols, ssl.ciphersuites, ssl.quorum.ciphersuites, ssl.hostnameVerification, ssl.quorum.hostnameVerification)."
+ livenessProbe:
+ type: object
+ properties:
+ initialDelaySeconds:
+ type: integer
+ minimum: 0
+ description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
+ timeoutSeconds:
+ type: integer
+ minimum: 1
+ description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
+ periodSeconds:
+ type: integer
+ minimum: 1
+ description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
+ successThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
+ failureThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
+ description: Pod liveness checking.
+ readinessProbe:
+ type: object
+ properties:
+ initialDelaySeconds:
+ type: integer
+ minimum: 0
+ description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
+ timeoutSeconds:
+ type: integer
+ minimum: 1
+ description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
+ periodSeconds:
+ type: integer
+ minimum: 1
+ description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
+ successThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
+ failureThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
+ description: Pod readiness checking.
+ jvmOptions:
+ type: object
+ properties:
+ "-XX":
+ additionalProperties:
type: string
- enum:
- - internal
- - route
- - loadbalancer
- - nodeport
- - ingress
- - cluster-ip
- description: "Type of the listener. The supported types are as follows: \n\n* `internal` type exposes Kafka internally only within the Kubernetes cluster.\n* `route` type uses OpenShift Routes to expose Kafka.\n* `loadbalancer` type uses LoadBalancer type services to expose Kafka.\n* `nodeport` type uses NodePort type services to expose Kafka.\n* `ingress` type uses Kubernetes Nginx Ingress to expose Kafka with TLS passthrough.\n* `cluster-ip` type uses a per-broker `ClusterIP` service.\n"
- tls:
- type: boolean
- description: Enables TLS encryption on the listener. This is a required property.
- authentication:
+ type: object
+ description: A map of -XX options to the JVM.
+ "-Xmx":
+ type: string
+ pattern: "^[0-9]+[mMgG]?$"
+ description: -Xmx option to to the JVM.
+ "-Xms":
+ type: string
+ pattern: "^[0-9]+[mMgG]?$"
+ description: -Xms option to to the JVM.
+ gcLoggingEnabled:
+ type: boolean
+ description: Specifies whether the Garbage Collection logging is enabled. The default is false.
+ javaSystemProperties:
+ type: array
+ items:
type: object
properties:
- accessTokenIsJwt:
- type: boolean
- description: Configure whether the access token is treated as JWT. This must be set to `false` if the authorization server returns opaque tokens. Defaults to `true`.
- checkAccessTokenType:
- type: boolean
- description: Configure whether the access token type check is performed or not. This should be set to `false` if the authorization server does not include 'typ' claim in JWT token. Defaults to `true`.
- checkAudience:
- type: boolean
- description: "Enable or disable audience checking. Audience checks identify the recipients of tokens. If audience checking is enabled, the OAuth Client ID also has to be configured using the `clientId` property. The Kafka broker will reject tokens that do not have its `clientId` in their `aud` (audience) claim.Default value is `false`."
- checkIssuer:
- type: boolean
- description: Enable or disable issuer checking. By default issuer is checked using the value configured by `validIssuerUri`. Default value is `true`.
- clientAudience:
+ name:
type: string
- description: The audience to use when making requests to the authorization server's token endpoint. Used for inter-broker authentication and for configuring OAuth 2.0 over PLAIN using the `clientId` and `secret` method.
- clientId:
+ description: The system property name.
+ value:
type: string
- description: OAuth Client ID which the Kafka broker can use to authenticate against the authorization server and use the introspect endpoint URI.
- clientScope:
+ description: The system property value.
+ description: A map of additional system properties which will be passed using the `-D` option to the JVM.
+ description: JVM Options for pods.
+ jmxOptions:
+ type: object
+ properties:
+ authentication:
+ type: object
+ properties:
+ type:
+ type: string
+ enum:
+ - password
+ description: Authentication type. Currently the only supported types are `password`.`password` type creates a username and protected port with no TLS.
+ required:
+ - type
+ description: Authentication configuration for connecting to the JMX port.
+ description: JMX Options for Zookeeper nodes.
+ resources:
+ type: object
+ properties:
+ claims:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
type: string
- description: The scope to use when making requests to the authorization server's token endpoint. Used for inter-broker authentication and for configuring OAuth 2.0 over PLAIN using the `clientId` and `secret` method.
- clientSecret:
- type: object
- properties:
- key:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
+ x-kubernetes-int-or-string: true
+ type: object
+ description: CPU and memory resources to reserve.
+ metricsConfig:
+ type: object
+ properties:
+ type:
+ type: string
+ enum:
+ - jmxPrometheusExporter
+ description: Metrics type. Only 'jmxPrometheusExporter' supported currently.
+ valueFrom:
+ type: object
+ properties:
+ configMapKeyRef:
+ type: object
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ description: Reference to the key in the ConfigMap containing the configuration.
+ description: 'ConfigMap entry where the Prometheus JMX Exporter configuration is stored. '
+ required:
+ - type
+ - valueFrom
+ description: Metrics configuration.
+ logging:
+ type: object
+ properties:
+ loggers:
+ additionalProperties:
+ type: string
+ type: object
+ description: A Map from logger name to logger level.
+ type:
+ type: string
+ enum:
+ - inline
+ - external
+ description: "Logging type, must be either 'inline' or 'external'."
+ valueFrom:
+ type: object
+ properties:
+ configMapKeyRef:
+ type: object
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ description: Reference to the key in the ConfigMap containing the configuration.
+ description: '`ConfigMap` entry where the logging configuration is stored. '
+ required:
+ - type
+ description: Logging configuration for ZooKeeper.
+ template:
+ type: object
+ properties:
+ statefulset:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
type: string
- description: The key under which the secret value is stored in the Kubernetes Secret.
- secretName:
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
type: string
- description: The name of the Kubernetes Secret containing the secret value.
- required:
- - key
- - secretName
- description: Link to Kubernetes Secret containing the OAuth client secret which the Kafka broker can use to authenticate against the authorization server and use the introspect endpoint URI.
- connectTimeoutSeconds:
- type: integer
- description: "The connect timeout in seconds when connecting to authorization server. If not set, the effective connect timeout is 60 seconds."
- customClaimCheck:
- type: string
- description: JsonPath filter query to be applied to the JWT token or to the response of the introspection endpoint for additional token validation. Not set by default.
- disableTlsHostnameVerification:
- type: boolean
- description: Enable or disable TLS hostname verification. Default value is `false`.
- enableECDSA:
- type: boolean
- description: Enable or disable ECDSA support by installing BouncyCastle crypto provider. ECDSA support is always enabled. The BouncyCastle libraries are no longer packaged with Strimzi. Value is ignored.
- enableMetrics:
- type: boolean
- description: Enable or disable OAuth metrics. Default value is `false`.
- enableOauthBearer:
- type: boolean
- description: Enable or disable OAuth authentication over SASL_OAUTHBEARER. Default value is `true`.
- enablePlain:
- type: boolean
- description: Enable or disable OAuth authentication over SASL_PLAIN. There is no re-authentication support when this mechanism is used. Default value is `false`.
- failFast:
- type: boolean
- description: Enable or disable termination of Kafka broker processes due to potentially recoverable runtime errors during startup. Default value is `true`.
- fallbackUserNameClaim:
- type: string
- description: The fallback username claim to be used for the user id if the claim specified by `userNameClaim` is not present. This is useful when `client_credentials` authentication only results in the client id being provided in another claim. It only takes effect if `userNameClaim` is set.
- fallbackUserNamePrefix:
- type: string
- description: "The prefix to use with the value of `fallbackUserNameClaim` to construct the user id. This only takes effect if `fallbackUserNameClaim` is true, and the value is present for the claim. Mapping usernames and client ids into the same user id space is useful in preventing name collisions."
- groupsClaim:
- type: string
- description: JsonPath query used to extract groups for the user during authentication. Extracted groups can be used by a custom authorizer. By default no groups are extracted.
- groupsClaimDelimiter:
- type: string
- description: "A delimiter used to parse groups when they are extracted as a single String value rather than a JSON array. Default value is ',' (comma)."
- httpRetries:
- type: integer
- description: "The maximum number of retries to attempt if an initial HTTP request fails. If not set, the default is to not attempt any retries."
- httpRetryPauseMs:
- type: integer
- description: "The pause to take before retrying a failed HTTP request. If not set, the default is to not pause at all but to immediately repeat a request."
- includeAcceptHeader:
- type: boolean
- description: Whether the Accept header should be set in requests to the authorization servers. The default value is `true`.
- introspectionEndpointUri:
- type: string
- description: URI of the token introspection endpoint which can be used to validate opaque non-JWT tokens.
- jwksEndpointUri:
- type: string
- description: "URI of the JWKS certificate endpoint, which can be used for local JWT validation."
- jwksExpirySeconds:
- type: integer
- minimum: 1
- description: Configures how often are the JWKS certificates considered valid. The expiry interval has to be at least 60 seconds longer then the refresh interval specified in `jwksRefreshSeconds`. Defaults to 360 seconds.
- jwksIgnoreKeyUse:
- type: boolean
- description: Flag to ignore the 'use' attribute of `key` declarations in a JWKS endpoint response. Default value is `false`.
- jwksMinRefreshPauseSeconds:
- type: integer
- minimum: 0
- description: "The minimum pause between two consecutive refreshes. When an unknown signing key is encountered the refresh is scheduled immediately, but will always wait for this minimum pause. Defaults to 1 second."
- jwksRefreshSeconds:
- type: integer
- minimum: 1
- description: Configures how often are the JWKS certificates refreshed. The refresh interval has to be at least 60 seconds shorter then the expiry interval specified in `jwksExpirySeconds`. Defaults to 300 seconds.
- listenerConfig:
- x-kubernetes-preserve-unknown-fields: true
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ podManagementPolicy:
+ type: string
+ enum:
+ - OrderedReady
+ - Parallel
+ description: PodManagementPolicy which will be used for this StatefulSet. Valid values are `Parallel` and `OrderedReady`. Defaults to `Parallel`.
+ description: Template for ZooKeeper `StatefulSet`.
+ podSet:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ description: Template for ZooKeeper `StrimziPodSet` resource.
+ pod:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ imagePullSecrets:
+ type: array
+ items:
type: object
- description: Configuration to be used for a specific listener. All values are prefixed with listener.name.__.
- maxSecondsWithoutReauthentication:
- type: integer
- description: "Maximum number of seconds the authenticated session remains valid without re-authentication. This enables Apache Kafka re-authentication feature, and causes sessions to expire when the access token expires. If the access token expires before max time or if max time is reached, the client has to re-authenticate, otherwise the server will drop the connection. Not set by default - the authenticated session does not expire when the access token expires. This option only applies to SASL_OAUTHBEARER authentication mechanism (when `enableOauthBearer` is `true`)."
- readTimeoutSeconds:
- type: integer
- description: "The read timeout in seconds when connecting to authorization server. If not set, the effective read timeout is 60 seconds."
- sasl:
- type: boolean
- description: Enable or disable SASL on this listener.
- secrets:
- type: array
- items:
+ properties:
+ name:
+ type: string
+ description: "List of references to secrets in the same namespace to use for pulling any of the images used by this Pod. When the `STRIMZI_IMAGE_PULL_SECRETS` environment variable in Cluster Operator and the `imagePullSecrets` option are specified, only the `imagePullSecrets` variable is used and the `STRIMZI_IMAGE_PULL_SECRETS` variable is ignored."
+ securityContext:
+ type: object
+ properties:
+ appArmorProfile:
+ type: object
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ fsGroup:
+ type: integer
+ fsGroupChangePolicy:
+ type: string
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
+ type: object
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ seccompProfile:
+ type: object
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ supplementalGroups:
+ type: array
+ items:
+ type: integer
+ sysctls:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ windowsOptions:
+ type: object
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ description: Configures pod-level security attributes and common container settings.
+ terminationGracePeriodSeconds:
+ type: integer
+ minimum: 0
+ description: "The grace period is the duration in seconds after the processes running in the pod are sent a termination signal, and the time when the processes are forcibly halted with a kill signal. Set this value to longer than the expected cleanup time for your process. Value must be a non-negative integer. A zero value indicates delete immediately. You might need to increase the grace period for very large Kafka clusters, so that the Kafka brokers have enough time to transfer their work to another broker before they are terminated. Defaults to 30 seconds."
+ affinity:
+ type: object
+ properties:
+ nodeAffinity:
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ preference:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchFields:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: object
+ properties:
+ nodeSelectorTerms:
+ type: array
+ items:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchFields:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ podAffinity:
type: object
properties:
- key:
- type: string
- description: The key under which the secret value is stored in the Kubernetes Secret.
- secretName:
- type: string
- description: The name of the Kubernetes Secret containing the secret value.
- required:
- - key
- - secretName
- description: Secrets to be mounted to /opt/kafka/custom-authn-secrets/custom-listener-_-_/__.
- tlsTrustedCertificates:
- type: array
- items:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ podAffinityTerm:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ matchLabelKeys:
+ type: array
+ items:
+ type: string
+ mismatchLabelKeys:
+ type: array
+ items:
+ type: string
+ namespaceSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ matchLabelKeys:
+ type: array
+ items:
+ type: string
+ mismatchLabelKeys:
+ type: array
+ items:
+ type: string
+ namespaceSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ podAntiAffinity:
type: object
properties:
- secretName:
- type: string
- description: The name of the Secret containing the certificate.
- certificate:
- type: string
- description: The name of the file certificate in the secret.
- pattern:
- type: string
- description: "Pattern for the certificate files in the secret. Use the link:https://en.wikipedia.org/wiki/Glob_(programming)[_glob syntax_] for the pattern. All files in the secret that match the pattern are used."
- oneOf:
- - properties:
- certificate: {}
- required:
- - certificate
- - properties:
- pattern: {}
- required:
- - pattern
- required:
- - secretName
- description: Trusted certificates for TLS connection to the OAuth server.
- tokenEndpointUri:
- type: string
- description: "URI of the Token Endpoint to use with SASL_PLAIN mechanism when the client authenticates with `clientId` and a `secret`. If set, the client can authenticate over SASL_PLAIN by either setting `username` to `clientId`, and setting `password` to client `secret`, or by setting `username` to account username, and `password` to access token prefixed with `$accessToken:`. If this option is not set, the `password` is always interpreted as an access token (without a prefix), and `username` as the account username (a so called 'no-client-credentials' mode)."
- type:
- type: string
- enum:
- - tls
- - scram-sha-512
- - oauth
- - custom
- description: Authentication type. `oauth` type uses SASL OAUTHBEARER Authentication. `scram-sha-512` type uses SASL SCRAM-SHA-512 Authentication. `tls` type uses TLS Client Authentication. `tls` type is supported only on TLS listeners.`custom` type allows for any authentication type to be used.
- userInfoEndpointUri:
- type: string
- description: 'URI of the User Info Endpoint to use as a fallback to obtaining the user id when the Introspection Endpoint does not return information that can be used for the user id. '
- userNameClaim:
- type: string
- description: "Name of the claim from the JWT authentication token, Introspection Endpoint response or User Info Endpoint response which will be used to extract the user id. Defaults to `sub`."
- validIssuerUri:
- type: string
- description: URI of the token issuer used for authentication.
- validTokenType:
- type: string
- description: "Valid value for the `token_type` attribute returned by the Introspection Endpoint. No default value, and not checked by default."
- required:
- - type
- description: Authentication configuration for this listener.
- configuration:
- type: object
- properties:
- brokerCertChainAndKey:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ podAffinityTerm:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ matchLabelKeys:
+ type: array
+ items:
+ type: string
+ mismatchLabelKeys:
+ type: array
+ items:
+ type: string
+ namespaceSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ matchLabelKeys:
+ type: array
+ items:
+ type: string
+ mismatchLabelKeys:
+ type: array
+ items:
+ type: string
+ namespaceSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ description: The pod's affinity rules.
+ tolerations:
+ type: array
+ items:
type: object
properties:
- secretName:
- type: string
- description: The name of the Secret containing the certificate.
- certificate:
+ effect:
type: string
- description: The name of the file certificate in the Secret.
key:
type: string
- description: The name of the private key in the Secret.
- required:
- - secretName
- - certificate
- - key
- description: Reference to the `Secret` which holds the certificate and private key pair which will be used for this listener. The certificate can optionally contain the whole chain. This field can be used only with listeners with enabled TLS encryption.
- class:
- type: string
- description: "Configures a specific class for `Ingress` and `LoadBalancer` that defines which controller will be used. This field can only be used with `ingress` and `loadbalancer` type listeners. If not specified, the default controller is used. For an `ingress` listener, set the `ingressClassName` property in the `Ingress` resources. For a `loadbalancer` listener, set the `loadBalancerClass` property in the `Service` resources."
- externalTrafficPolicy:
- type: string
- enum:
- - Local
- - Cluster
- description: "Specifies whether the service routes external traffic to node-local or cluster-wide endpoints. `Cluster` may cause a second hop to another node and obscures the client source IP. `Local` avoids a second hop for LoadBalancer and Nodeport type services and preserves the client source IP (when supported by the infrastructure). If unspecified, Kubernetes will use `Cluster` as the default.This field can be used only with `loadbalancer` or `nodeport` type listener."
- loadBalancerSourceRanges:
- type: array
- items:
- type: string
- description: "A list of CIDR ranges (for example `10.0.0.0/8` or `130.211.204.1/32`) from which clients can connect to load balancer type listeners. If supported by the platform, traffic through the loadbalancer is restricted to the specified CIDR ranges. This field is applicable only for loadbalancer type services and is ignored if the cloud provider does not support the feature. This field can be used only with `loadbalancer` type listener."
- bootstrap:
- type: object
- properties:
- alternativeNames:
- type: array
- items:
- type: string
- description: Additional alternative names for the bootstrap service. The alternative names will be added to the list of subject alternative names of the TLS certificates.
- host:
- type: string
- description: The bootstrap host. This field will be used in the Ingress resource or in the Route resource to specify the desired hostname. This field can be used only with `route` (optional) or `ingress` (required) type listeners.
- nodePort:
- type: integer
- description: Node port for the bootstrap service. This field can be used only with `nodeport` type listener.
- loadBalancerIP:
+ operator:
type: string
- description: The loadbalancer is requested with the IP address specified in this field. This feature depends on whether the underlying cloud provider supports specifying the `loadBalancerIP` when a load balancer is created. This field is ignored if the cloud provider does not support the feature.This field can be used only with `loadbalancer` type listener.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: "Annotations that will be added to the `Ingress`, `Route`, or `Service` resource. You can use this field to configure DNS providers such as External DNS. This field can be used only with `loadbalancer`, `nodeport`, `route`, or `ingress` type listeners."
- labels:
- additionalProperties:
- type: string
- type: object
- description: "Labels that will be added to the `Ingress`, `Route`, or `Service` resource. This field can be used only with `loadbalancer`, `nodeport`, `route`, or `ingress` type listeners."
- externalIPs:
- type: array
- items:
- type: string
- description: External IPs associated to the nodeport service. These IPs are used by clients external to the Kubernetes cluster to access the Kafka brokers. This field is helpful when `nodeport` without `externalIP` is not sufficient. For example on bare-metal Kubernetes clusters that do not support Loadbalancer service types. This field can only be used with `nodeport` type listener.
- description: Bootstrap configuration.
- brokers:
- type: array
- items:
- type: object
- properties:
- broker:
- type: integer
- description: ID of the kafka broker (broker identifier). Broker IDs start from 0 and correspond to the number of broker replicas.
- advertisedHost:
- type: string
- description: The host name used in the brokers' `advertised.listeners`.
- advertisedPort:
- type: integer
- description: The port number used in the brokers' `advertised.listeners`.
- host:
- type: string
- description: The broker host. This field will be used in the Ingress resource or in the Route resource to specify the desired hostname. This field can be used only with `route` (optional) or `ingress` (required) type listeners.
- nodePort:
- type: integer
- description: Node port for the per-broker service. This field can be used only with `nodeport` type listener.
- loadBalancerIP:
- type: string
- description: The loadbalancer is requested with the IP address specified in this field. This feature depends on whether the underlying cloud provider supports specifying the `loadBalancerIP` when a load balancer is created. This field is ignored if the cloud provider does not support the feature.This field can be used only with `loadbalancer` type listener.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: "Annotations that will be added to the `Ingress` or `Service` resource. You can use this field to configure DNS providers such as External DNS. This field can be used only with `loadbalancer`, `nodeport`, or `ingress` type listeners."
- labels:
- additionalProperties:
- type: string
- type: object
- description: "Labels that will be added to the `Ingress`, `Route`, or `Service` resource. This field can be used only with `loadbalancer`, `nodeport`, `route`, or `ingress` type listeners."
- externalIPs:
- type: array
- items:
- type: string
- description: External IPs associated to the nodeport service. These IPs are used by clients external to the Kubernetes cluster to access the Kafka brokers. This field is helpful when `nodeport` without `externalIP` is not sufficient. For example on bare-metal Kubernetes clusters that do not support Loadbalancer service types. This field can only be used with `nodeport` type listener.
- required:
- - broker
- description: Per-broker configurations.
- ipFamilyPolicy:
- type: string
- enum:
- - SingleStack
- - PreferDualStack
- - RequireDualStack
- description: "Specifies the IP Family Policy used by the service. Available options are `SingleStack`, `PreferDualStack` and `RequireDualStack`. `SingleStack` is for a single IP family. `PreferDualStack` is for two IP families on dual-stack configured clusters or a single IP family on single-stack clusters. `RequireDualStack` fails unless there are two IP families on dual-stack configured clusters. If unspecified, Kubernetes will choose the default value based on the service type."
- ipFamilies:
- type: array
- items:
- type: string
- enum:
- - IPv4
- - IPv6
- description: "Specifies the IP Families used by the service. Available options are `IPv4` and `IPv6`. If unspecified, Kubernetes will choose the default value based on the `ipFamilyPolicy` setting."
- createBootstrapService:
- type: boolean
- description: Whether to create the bootstrap service or not. The bootstrap service is created by default (if not specified differently). This field can be used with the `loadBalancer` type listener.
- finalizers:
- type: array
- items:
- type: string
- description: "A list of finalizers which will be configured for the `LoadBalancer` type Services created for this listener. If supported by the platform, the finalizer `service.kubernetes.io/load-balancer-cleanup` to make sure that the external load balancer is deleted together with the service.For more information, see https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#garbage-collecting-load-balancers. This field can be used only with `loadbalancer` type listeners."
- useServiceDnsDomain:
- type: boolean
- description: "Configures whether the Kubernetes service DNS domain should be used or not. If set to `true`, the generated addresses will contain the service DNS domain suffix (by default `.cluster.local`, can be configured using environment variable `KUBERNETES_SERVICE_DNS_DOMAIN`). Defaults to `false`.This field can be used only with `internal` and `cluster-ip` type listeners."
- maxConnections:
- type: integer
- description: The maximum number of connections we allow for this listener in the broker at any time. New connections are blocked if the limit is reached.
- maxConnectionCreationRate:
- type: integer
- description: The maximum connection creation rate we allow in this listener at any time. New connections will be throttled if the limit is reached.
- preferredNodePortAddressType:
- type: string
- enum:
- - ExternalIP
- - ExternalDNS
- - InternalIP
- - InternalDNS
- - Hostname
- description: |-
- Defines which address type should be used as the node address. Available types are: `ExternalDNS`, `ExternalIP`, `InternalDNS`, `InternalIP` and `Hostname`. By default, the addresses will be used in the following order (the first one found will be used):
-
- * `ExternalDNS`
- * `ExternalIP`
- * `InternalDNS`
- * `InternalIP`
- * `Hostname`
-
- This field is used to select the preferred address type, which is checked first. If no address is found for this address type, the other types are checked in the default order. This field can only be used with `nodeport` type listener.
- publishNotReadyAddresses:
- type: boolean
- description: Configures whether the service endpoints are considered "ready" even if the Pods themselves are not. Defaults to `false`. This field can not be used with `internal` type listeners.
- description: Additional listener configuration.
- networkPolicyPeers:
- type: array
- items:
- type: object
- properties:
- ipBlock:
- type: object
- properties:
- cidr:
+ tolerationSeconds:
+ type: integer
+ value:
+ type: string
+ description: The pod's tolerations.
+ topologySpreadConstraints:
+ type: array
+ items:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ matchLabelKeys:
+ type: array
+ items:
type: string
- except:
- type: array
+ maxSkew:
+ type: integer
+ minDomains:
+ type: integer
+ nodeAffinityPolicy:
+ type: string
+ nodeTaintsPolicy:
+ type: string
+ topologyKey:
+ type: string
+ whenUnsatisfiable:
+ type: string
+ description: The pod's topology spread constraints.
+ priorityClassName:
+ type: string
+ description: 'The name of the priority class used to assign priority to the pods. '
+ schedulerName:
+ type: string
+ description: "The name of the scheduler used to dispatch this `Pod`. If not specified, the default scheduler will be used."
+ hostAliases:
+ type: array
+ items:
+ type: object
+ properties:
+ hostnames:
+ type: array
+ items:
+ type: string
+ ip:
+ type: string
+ description: The pod's HostAliases. HostAliases is an optional list of hosts and IPs that will be injected into the Pod's hosts file if specified.
+ enableServiceLinks:
+ type: boolean
+ description: Indicates whether information about services should be injected into Pod's environment variables.
+ tmpDirSizeLimit:
+ type: string
+ pattern: "^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
+ description: "Defines the total amount of pod memory allocated for the temporary `EmptyDir` volume `/tmp`. Specify the allocation in memory units, for example, `100Mi` for 100 mebibytes. Default value is `5Mi`. The `/tmp` volume is backed by pod memory, not disk storage, so avoid setting a high value as it consumes pod memory resources."
+ volumes:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: Name to use for the volume. Required.
+ secret:
+ type: object
+ properties:
+ defaultMode:
+ type: integer
items:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ optional:
+ type: boolean
+ secretName:
type: string
- namespaceSelector:
- type: object
- properties:
- matchExpressions:
- type: array
+ description: Secret to use populate the volume.
+ configMap:
+ type: object
+ properties:
+ defaultMode:
+ type: integer
items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
type: string
- matchLabels:
- additionalProperties:
+ mode:
+ type: integer
+ path:
+ type: string
+ name:
type: string
- type: object
- podSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
+ optional:
+ type: boolean
+ description: ConfigMap to use to populate the volume.
+ emptyDir:
+ type: object
+ properties:
+ medium:
+ type: string
+ sizeLimit:
type: object
properties:
- key:
+ amount:
type: string
- operator:
+ format:
type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
+ description: EmptyDir to use to populate the volume.
+ persistentVolumeClaim:
+ type: object
+ properties:
+ claimName:
type: string
- type: object
- description: "List of peers which should be able to connect to this listener. Peers in this list are combined using a logical OR operation. If this field is empty or missing, all connections will be allowed for this listener. If this field is present and contains at least one item, the listener only allows the traffic which matches at least one item in this list."
- required:
- - name
- - port
- - type
- - tls
- description: Configures listeners of Kafka brokers.
- config:
- x-kubernetes-preserve-unknown-fields: true
- type: object
- description: "Kafka broker config properties with the following prefixes cannot be set: listeners, advertised., broker., listener., host.name, port, inter.broker.listener.name, sasl., ssl., security., password., log.dir, zookeeper.connect, zookeeper.set.acl, zookeeper.ssl, zookeeper.clientCnxnSocket, authorizer., super.user, cruise.control.metrics.topic, cruise.control.metrics.reporter.bootstrap.servers, node.id, process.roles, controller., metadata.log.dir, zookeeper.metadata.migration.enable, client.quota.callback.static.kafka.admin., client.quota.callback.static.produce, client.quota.callback.static.fetch, client.quota.callback.static.storage.per.volume.limit.min.available., client.quota.callback.static.excluded.principal.name.list (with the exception of: zookeeper.connection.timeout.ms, sasl.server.max.receive.size, ssl.cipher.suites, ssl.protocol, ssl.enabled.protocols, ssl.secure.random.implementation, cruise.control.metrics.topic.num.partitions, cruise.control.metrics.topic.replication.factor, cruise.control.metrics.topic.retention.ms, cruise.control.metrics.topic.auto.create.retries, cruise.control.metrics.topic.auto.create.timeout.ms, cruise.control.metrics.topic.min.insync.replicas, controller.quorum.election.backoff.max.ms, controller.quorum.election.timeout.ms, controller.quorum.fetch.timeout.ms)."
- storage:
- type: object
- properties:
- class:
- type: string
- description: The storage class to use for dynamic volume allocation.
- deleteClaim:
- type: boolean
- description: Specifies if the persistent volume claim has to be deleted when the cluster is un-deployed.
- id:
- type: integer
- minimum: 0
- description: Storage identification number. It is mandatory only for storage volumes defined in a storage of type 'jbod'.
- kraftMetadata:
- type: string
- enum:
- - shared
- description: "Specifies whether this volume should be used for storing KRaft metadata. This property is optional. When set, the only currently supported value is `shared`. At most one volume can have this property set."
- overrides:
- type: array
- items:
- type: object
- properties:
- class:
- type: string
- description: The storage class to use for dynamic volume allocation for this broker.
- broker:
- type: integer
- description: Id of the kafka broker (broker identifier).
- description: Overrides for individual brokers. The `overrides` field allows to specify a different configuration for different brokers.
- selector:
- additionalProperties:
- type: string
+ readOnly:
+ type: boolean
+ description: PersistentVolumeClaim object to use to populate the volume.
+ oneOf:
+ - properties:
+ secret: {}
+ configMap: {}
+ emptyDir: {}
+ persistentVolumeClaim: {}
+ required: []
+ description: Additional volumes that can be mounted to the pod.
+ description: Template for ZooKeeper `Pods`.
+ clientService:
type: object
- description: Specifies a specific persistent volume to use. It contains key:value pairs representing labels for selecting such a volume.
- size:
- type: string
- description: "When `type=persistent-claim`, defines the size of the persistent volume claim, such as 100Gi. Mandatory when `type=persistent-claim`."
- sizeLimit:
- type: string
- pattern: "^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
- description: "When type=ephemeral, defines the total amount of local storage required for this EmptyDir volume (for example 1Gi)."
- type:
- type: string
- enum:
- - ephemeral
- - persistent-claim
- - jbod
- description: "Storage type, must be either 'ephemeral', 'persistent-claim', or 'jbod'."
- volumes:
- type: array
- items:
- type: object
- properties:
- class:
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ ipFamilyPolicy:
+ type: string
+ enum:
+ - SingleStack
+ - PreferDualStack
+ - RequireDualStack
+ description: "Specifies the IP Family Policy used by the service. Available options are `SingleStack`, `PreferDualStack` and `RequireDualStack`. `SingleStack` is for a single IP family. `PreferDualStack` is for two IP families on dual-stack configured clusters or a single IP family on single-stack clusters. `RequireDualStack` fails unless there are two IP families on dual-stack configured clusters. If unspecified, Kubernetes will choose the default value based on the service type."
+ ipFamilies:
+ type: array
+ items:
type: string
- description: The storage class to use for dynamic volume allocation.
- deleteClaim:
- type: boolean
- description: Specifies if the persistent volume claim has to be deleted when the cluster is un-deployed.
- id:
- type: integer
- minimum: 0
- description: Storage identification number. Mandatory for storage volumes defined with a `jbod` storage type configuration.
- kraftMetadata:
+ enum:
+ - IPv4
+ - IPv6
+ description: "Specifies the IP Families used by the service. Available options are `IPv4` and `IPv6`. If unspecified, Kubernetes will choose the default value based on the `ipFamilyPolicy` setting."
+ description: Template for ZooKeeper client `Service`.
+ nodesService:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ ipFamilyPolicy:
+ type: string
+ enum:
+ - SingleStack
+ - PreferDualStack
+ - RequireDualStack
+ description: "Specifies the IP Family Policy used by the service. Available options are `SingleStack`, `PreferDualStack` and `RequireDualStack`. `SingleStack` is for a single IP family. `PreferDualStack` is for two IP families on dual-stack configured clusters or a single IP family on single-stack clusters. `RequireDualStack` fails unless there are two IP families on dual-stack configured clusters. If unspecified, Kubernetes will choose the default value based on the service type."
+ ipFamilies:
+ type: array
+ items:
type: string
enum:
- - shared
- description: "Specifies whether this volume should be used for storing KRaft metadata. This property is optional. When set, the only currently supported value is `shared`. At most one volume can have this property set."
- overrides:
- type: array
- items:
+ - IPv4
+ - IPv6
+ description: "Specifies the IP Families used by the service. Available options are `IPv4` and `IPv6`. If unspecified, Kubernetes will choose the default value based on the `ipFamilyPolicy` setting."
+ description: Template for ZooKeeper nodes `Service`.
+ persistentVolumeClaim:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ description: Template for all ZooKeeper `PersistentVolumeClaims`.
+ podDisruptionBudget:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata to apply to the `PodDisruptionBudgetTemplate` resource.
+ maxUnavailable:
+ type: integer
+ minimum: 0
+ description: "Maximum number of unavailable pods to allow automatic Pod eviction. A Pod eviction is allowed when the `maxUnavailable` number of pods or fewer are unavailable after the eviction. Setting this value to 0 prevents all voluntary evictions, so the pods must be evicted manually. Defaults to 1."
+ description: Template for ZooKeeper `PodDisruptionBudget`.
+ zookeeperContainer:
+ type: object
+ properties:
+ env:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: The environment variable key.
+ value:
+ type: string
+ description: The environment variable value.
+ description: Environment variables which should be applied to the container.
+ securityContext:
+ type: object
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ appArmorProfile:
type: object
properties:
- class:
+ localhostProfile:
type: string
- description: The storage class to use for dynamic volume allocation for this broker.
- broker:
- type: integer
- description: Id of the kafka broker (broker identifier).
- description: Overrides for individual brokers. The `overrides` field allows to specify a different configuration for different brokers.
- selector:
- additionalProperties:
+ type:
+ type: string
+ capabilities:
+ type: object
+ properties:
+ add:
+ type: array
+ items:
+ type: string
+ drop:
+ type: array
+ items:
+ type: string
+ privileged:
+ type: boolean
+ procMount:
type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
+ type: object
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ seccompProfile:
+ type: object
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ windowsOptions:
+ type: object
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ description: Security context for the container.
+ volumeMounts:
+ type: array
+ items:
type: object
- description: Specifies a specific persistent volume to use. It contains key:value pairs representing labels for selecting such a volume.
- size:
- type: string
- description: "When `type=persistent-claim`, defines the size of the persistent volume claim, such as 100Gi. Mandatory when `type=persistent-claim`."
- sizeLimit:
- type: string
- pattern: "^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
- description: "When type=ephemeral, defines the total amount of local storage required for this EmptyDir volume (for example 1Gi)."
- type:
- type: string
- enum:
- - ephemeral
- - persistent-claim
- description: "Storage type, must be either 'ephemeral' or 'persistent-claim'."
- required:
- - type
- description: List of volumes as Storage objects representing the JBOD disks array.
- required:
- - type
- description: Storage configuration (disk). Cannot be updated. This property is required when node pools are not used.
- authorization:
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ recursiveReadOnly:
+ type: string
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ description: Additional volume mounts which should be applied to the container.
+ description: Template for the ZooKeeper container.
+ serviceAccount:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ description: Template for the ZooKeeper service account.
+ jmxSecret:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ description: Template for Secret of the Zookeeper Cluster JMX authentication.
+ description: Template for ZooKeeper cluster resources. The template allows users to specify how the Kubernetes resources are generated.
+ required:
+ - replicas
+ - storage
+ description: Configuration of the ZooKeeper cluster. This section is required when running a ZooKeeper-based Apache Kafka cluster.
+ entityOperator:
+ type: object
+ properties:
+ topicOperator:
type: object
properties:
- allowOnError:
- type: boolean
- description: "Defines whether a Kafka client should be allowed or denied by default when the authorizer fails to query the Open Policy Agent, for example, when it is temporarily unavailable). Defaults to `false` - all actions will be denied."
- authorizerClass:
- type: string
- description: "Authorization implementation class, which must be available in classpath."
- clientId:
- type: string
- description: OAuth Client ID which the Kafka client can use to authenticate against the OAuth server and use the token endpoint URI.
- connectTimeoutSeconds:
- type: integer
- minimum: 1
- description: "The connect timeout in seconds when connecting to authorization server. If not set, the effective connect timeout is 60 seconds."
- delegateToKafkaAcls:
- type: boolean
- description: Whether authorization decision should be delegated to the 'Simple' authorizer if DENIED by Keycloak Authorization Services policies. Default value is `false`.
- disableTlsHostnameVerification:
- type: boolean
- description: Enable or disable TLS hostname verification. Default value is `false`.
- enableMetrics:
- type: boolean
- description: Enable or disable OAuth metrics. The default value is `false`.
- expireAfterMs:
- type: integer
- description: The expiration of the records kept in the local cache to avoid querying the Open Policy Agent for every request. Defines how often the cached authorization decisions are reloaded from the Open Policy Agent server. In milliseconds. Defaults to `3600000`.
- grantsAlwaysLatest:
- type: boolean
- description: "Controls whether the latest grants are fetched for a new session. When enabled, grants are retrieved from Keycloak and cached for the user. The default value is `false`."
- grantsGcPeriodSeconds:
- type: integer
- minimum: 1
- description: "The time, in seconds, between consecutive runs of a job that cleans stale grants from the cache. The default value is 300."
- grantsMaxIdleTimeSeconds:
- type: integer
- minimum: 1
- description: "The time, in seconds, after which an idle grant can be evicted from the cache. The default value is 300."
- grantsRefreshPeriodSeconds:
+ watchedNamespace:
+ type: string
+ description: The namespace the Topic Operator should watch.
+ image:
+ type: string
+ description: The image to use for the Topic Operator.
+ reconciliationIntervalSeconds:
type: integer
minimum: 0
- description: The time between two consecutive grants refresh runs in seconds. The default value is 60.
- grantsRefreshPoolSize:
- type: integer
- minimum: 1
- description: "The number of threads to use to refresh grants for active sessions. The more threads, the more parallelism, so the sooner the job completes. However, using more threads places a heavier load on the authorization server. The default value is 5."
- httpRetries:
+ description: Interval between periodic reconciliations in seconds. Ignored if reconciliationIntervalMs is set.
+ reconciliationIntervalMs:
type: integer
minimum: 0
- description: "The maximum number of retries to attempt if an initial HTTP request fails. If not set, the default is to not attempt any retries."
- includeAcceptHeader:
- type: boolean
- description: Whether the Accept header should be set in requests to the authorization servers. The default value is `true`.
- initialCacheCapacity:
- type: integer
- description: Initial capacity of the local cache used by the authorizer to avoid querying the Open Policy Agent for every request Defaults to `5000`.
- maximumCacheSize:
+ description: Interval between periodic reconciliations in milliseconds.
+ zookeeperSessionTimeoutSeconds:
type: integer
- description: Maximum capacity of the local cache used by the authorizer to avoid querying the Open Policy Agent for every request. Defaults to `50000`.
- readTimeoutSeconds:
+ minimum: 0
+ description: Timeout for the ZooKeeper session.
+ startupProbe:
+ type: object
+ properties:
+ initialDelaySeconds:
+ type: integer
+ minimum: 0
+ description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
+ timeoutSeconds:
+ type: integer
+ minimum: 1
+ description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
+ periodSeconds:
+ type: integer
+ minimum: 1
+ description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
+ successThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
+ failureThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
+ description: Pod startup checking.
+ livenessProbe:
+ type: object
+ properties:
+ initialDelaySeconds:
+ type: integer
+ minimum: 0
+ description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
+ timeoutSeconds:
+ type: integer
+ minimum: 1
+ description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
+ periodSeconds:
+ type: integer
+ minimum: 1
+ description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
+ successThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
+ failureThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
+ description: Pod liveness checking.
+ readinessProbe:
+ type: object
+ properties:
+ initialDelaySeconds:
+ type: integer
+ minimum: 0
+ description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
+ timeoutSeconds:
+ type: integer
+ minimum: 1
+ description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
+ periodSeconds:
+ type: integer
+ minimum: 1
+ description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
+ successThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
+ failureThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
+ description: Pod readiness checking.
+ resources:
+ type: object
+ properties:
+ claims:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
+ x-kubernetes-int-or-string: true
+ type: object
+ description: CPU and memory resources to reserve.
+ topicMetadataMaxAttempts:
type: integer
- minimum: 1
- description: "The read timeout in seconds when connecting to authorization server. If not set, the effective read timeout is 60 seconds."
- superUsers:
- type: array
- items:
- type: string
- description: "List of super users, which are user principals with unlimited access rights."
- supportsAdminApi:
- type: boolean
- description: Indicates whether the custom authorizer supports the APIs for managing ACLs using the Kafka Admin API. Defaults to `false`.
- tlsTrustedCertificates:
- type: array
- items:
- type: object
- properties:
- secretName:
- type: string
- description: The name of the Secret containing the certificate.
- certificate:
+ minimum: 0
+ description: The number of attempts at getting topic metadata.
+ logging:
+ type: object
+ properties:
+ loggers:
+ additionalProperties:
type: string
- description: The name of the file certificate in the secret.
- pattern:
+ type: object
+ description: A Map from logger name to logger level.
+ type:
+ type: string
+ enum:
+ - inline
+ - external
+ description: "Logging type, must be either 'inline' or 'external'."
+ valueFrom:
+ type: object
+ properties:
+ configMapKeyRef:
+ type: object
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ description: Reference to the key in the ConfigMap containing the configuration.
+ description: '`ConfigMap` entry where the logging configuration is stored. '
+ required:
+ - type
+ description: Logging configuration.
+ jvmOptions:
+ type: object
+ properties:
+ "-XX":
+ additionalProperties:
type: string
- description: "Pattern for the certificate files in the secret. Use the link:https://en.wikipedia.org/wiki/Glob_(programming)[_glob syntax_] for the pattern. All files in the secret that match the pattern are used."
- oneOf:
- - properties:
- certificate: {}
- required:
- - certificate
- - properties:
- pattern: {}
- required:
- - pattern
- required:
- - secretName
- description: Trusted certificates for TLS connection to the OAuth server.
- tokenEndpointUri:
- type: string
- description: Authorization server token endpoint URI.
- type:
- type: string
- enum:
- - simple
- - opa
- - keycloak
- - custom
- description: "Authorization type. Currently, the supported types are `simple`, `keycloak`, `opa` and `custom`. `simple` authorization type uses Kafka's built-in authorizer for authorization. `keycloak` authorization type uses Keycloak Authorization Services for authorization. `opa` authorization type uses Open Policy Agent based authorization.`custom` authorization type uses user-provided implementation for authorization."
- url:
- type: string
- example: http://opa:8181/v1/data/kafka/authz/allow
- description: The URL used to connect to the Open Policy Agent server. The URL has to include the policy which will be queried by the authorizer. This option is required.
- required:
- - type
- description: Authorization configuration for Kafka brokers.
- rack:
- type: object
- properties:
- topologyKey:
- type: string
- example: topology.kubernetes.io/zone
- description: "A key that matches labels assigned to the Kubernetes cluster nodes. The value of the label is used to set a broker's `broker.rack` config, and the `client.rack` config for Kafka Connect or MirrorMaker 2."
- required:
- - topologyKey
- description: Configuration of the `broker.rack` broker config.
- brokerRackInitImage:
- type: string
- description: The image of the init container used for initializing the `broker.rack`.
- livenessProbe:
+ type: object
+ description: A map of -XX options to the JVM.
+ "-Xmx":
+ type: string
+ pattern: "^[0-9]+[mMgG]?$"
+ description: -Xmx option to to the JVM.
+ "-Xms":
+ type: string
+ pattern: "^[0-9]+[mMgG]?$"
+ description: -Xms option to to the JVM.
+ gcLoggingEnabled:
+ type: boolean
+ description: Specifies whether the Garbage Collection logging is enabled. The default is false.
+ javaSystemProperties:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: The system property name.
+ value:
+ type: string
+ description: The system property value.
+ description: A map of additional system properties which will be passed using the `-D` option to the JVM.
+ description: JVM Options for pods.
+ description: Configuration of the Topic Operator.
+ userOperator:
type: object
properties:
- initialDelaySeconds:
+ watchedNamespace:
+ type: string
+ description: The namespace the User Operator should watch.
+ image:
+ type: string
+ description: The image to use for the User Operator.
+ reconciliationIntervalSeconds:
type: integer
minimum: 0
- description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
- timeoutSeconds:
- type: integer
- minimum: 1
- description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
- periodSeconds:
- type: integer
- minimum: 1
- description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
- successThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
- failureThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
- description: Pod liveness checking.
- readinessProbe:
- type: object
- properties:
- initialDelaySeconds:
+ description: Interval between periodic reconciliations in seconds. Ignored if reconciliationIntervalMs is set.
+ reconciliationIntervalMs:
type: integer
minimum: 0
- description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
- timeoutSeconds:
- type: integer
- minimum: 1
- description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
- periodSeconds:
- type: integer
- minimum: 1
- description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
- successThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
- failureThreshold:
+ description: Interval between periodic reconciliations in milliseconds.
+ zookeeperSessionTimeoutSeconds:
type: integer
- minimum: 1
- description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
- description: Pod readiness checking.
- jvmOptions:
- type: object
- properties:
- "-XX":
- additionalProperties:
- type: string
- type: object
- description: A map of -XX options to the JVM.
- "-Xmx":
- type: string
- pattern: "^[0-9]+[mMgG]?$"
- description: -Xmx option to to the JVM.
- "-Xms":
+ minimum: 0
+ description: Timeout for the ZooKeeper session.
+ secretPrefix:
type: string
- pattern: "^[0-9]+[mMgG]?$"
- description: -Xms option to to the JVM.
- gcLoggingEnabled:
- type: boolean
- description: Specifies whether the Garbage Collection logging is enabled. The default is false.
- javaSystemProperties:
- type: array
- items:
- type: object
- properties:
- name:
- type: string
- description: The system property name.
- value:
- type: string
- description: The system property value.
- description: A map of additional system properties which will be passed using the `-D` option to the JVM.
- description: JVM Options for pods.
- jmxOptions:
- type: object
- properties:
- authentication:
+ description: The prefix that will be added to the KafkaUser name to be used as the Secret name.
+ livenessProbe:
+ type: object
+ properties:
+ initialDelaySeconds:
+ type: integer
+ minimum: 0
+ description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
+ timeoutSeconds:
+ type: integer
+ minimum: 1
+ description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
+ periodSeconds:
+ type: integer
+ minimum: 1
+ description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
+ successThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
+ failureThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
+ description: Pod liveness checking.
+ readinessProbe:
+ type: object
+ properties:
+ initialDelaySeconds:
+ type: integer
+ minimum: 0
+ description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
+ timeoutSeconds:
+ type: integer
+ minimum: 1
+ description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
+ periodSeconds:
+ type: integer
+ minimum: 1
+ description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
+ successThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
+ failureThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
+ description: Pod readiness checking.
+ resources:
+ type: object
+ properties:
+ claims:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
+ x-kubernetes-int-or-string: true
+ type: object
+ description: CPU and memory resources to reserve.
+ logging:
type: object
properties:
+ loggers:
+ additionalProperties:
+ type: string
+ type: object
+ description: A Map from logger name to logger level.
type:
type: string
enum:
- - password
- description: Authentication type. Currently the only supported types are `password`.`password` type creates a username and protected port with no TLS.
+ - inline
+ - external
+ description: "Logging type, must be either 'inline' or 'external'."
+ valueFrom:
+ type: object
+ properties:
+ configMapKeyRef:
+ type: object
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ description: Reference to the key in the ConfigMap containing the configuration.
+ description: '`ConfigMap` entry where the logging configuration is stored. '
required:
- type
- description: Authentication configuration for connecting to the JMX port.
- description: JMX Options for Kafka brokers.
- resources:
- type: object
- properties:
- claims:
- type: array
- items:
- type: object
- properties:
- name:
- type: string
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
- x-kubernetes-int-or-string: true
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
- x-kubernetes-int-or-string: true
+ description: Logging configuration.
+ jvmOptions:
type: object
- description: CPU and memory resources to reserve.
- metricsConfig:
+ properties:
+ "-XX":
+ additionalProperties:
+ type: string
+ type: object
+ description: A map of -XX options to the JVM.
+ "-Xmx":
+ type: string
+ pattern: "^[0-9]+[mMgG]?$"
+ description: -Xmx option to to the JVM.
+ "-Xms":
+ type: string
+ pattern: "^[0-9]+[mMgG]?$"
+ description: -Xms option to to the JVM.
+ gcLoggingEnabled:
+ type: boolean
+ description: Specifies whether the Garbage Collection logging is enabled. The default is false.
+ javaSystemProperties:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: The system property name.
+ value:
+ type: string
+ description: The system property value.
+ description: A map of additional system properties which will be passed using the `-D` option to the JVM.
+ description: JVM Options for pods.
+ description: Configuration of the User Operator.
+ tlsSidecar:
type: object
properties:
- type:
+ image:
type: string
- enum:
- - jmxPrometheusExporter
- description: Metrics type. Only 'jmxPrometheusExporter' supported currently.
- valueFrom:
+ description: The docker image for the container.
+ resources:
type: object
properties:
- configMapKeyRef:
+ claims:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
+ x-kubernetes-int-or-string: true
type: object
- properties:
- key:
- type: string
- name:
- type: string
- optional:
- type: boolean
- description: Reference to the key in the ConfigMap containing the configuration.
- description: 'ConfigMap entry where the Prometheus JMX Exporter configuration is stored. '
- required:
- - type
- - valueFrom
- description: Metrics configuration.
- logging:
- type: object
- properties:
- loggers:
- additionalProperties:
- type: string
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
+ x-kubernetes-int-or-string: true
+ type: object
+ description: CPU and memory resources to reserve.
+ livenessProbe:
type: object
- description: A Map from logger name to logger level.
- type:
- type: string
- enum:
- - inline
- - external
- description: "Logging type, must be either 'inline' or 'external'."
- valueFrom:
+ properties:
+ initialDelaySeconds:
+ type: integer
+ minimum: 0
+ description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
+ timeoutSeconds:
+ type: integer
+ minimum: 1
+ description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
+ periodSeconds:
+ type: integer
+ minimum: 1
+ description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
+ successThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
+ failureThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
+ description: Pod liveness checking.
+ readinessProbe:
type: object
properties:
- configMapKeyRef:
- type: object
- properties:
- key:
- type: string
- name:
- type: string
- optional:
- type: boolean
- description: Reference to the key in the ConfigMap containing the configuration.
- description: '`ConfigMap` entry where the logging configuration is stored. '
- required:
- - type
- description: Logging configuration for Kafka.
+ initialDelaySeconds:
+ type: integer
+ minimum: 0
+ description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
+ timeoutSeconds:
+ type: integer
+ minimum: 1
+ description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
+ periodSeconds:
+ type: integer
+ minimum: 1
+ description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
+ successThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
+ failureThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
+ description: Pod readiness checking.
+ logLevel:
+ type: string
+ enum:
+ - emerg
+ - alert
+ - crit
+ - err
+ - warning
+ - notice
+ - info
+ - debug
+ description: The log level for the TLS sidecar. Default value is `notice`.
+ description: TLS sidecar configuration.
template:
type: object
properties:
- statefulset:
+ deployment:
type: object
properties:
metadata:
@@ -7641,13 +4687,13 @@ spec:
type: object
description: Annotations added to the Kubernetes resource.
description: Metadata applied to the resource.
- podManagementPolicy:
+ deploymentStrategy:
type: string
enum:
- - OrderedReady
- - Parallel
- description: PodManagementPolicy which will be used for this StatefulSet. Valid values are `Parallel` and `OrderedReady`. Defaults to `Parallel`.
- description: Template for Kafka `StatefulSet`.
+ - RollingUpdate
+ - Recreate
+ description: Pod replacement strategy for deployment configuration changes. Valid values are `RollingUpdate` and `Recreate`. Defaults to `RollingUpdate`.
+ description: Template for Entity Operator `Deployment`.
pod:
type: object
properties:
@@ -8151,223 +5197,189 @@ spec:
tmpDirSizeLimit:
type: string
pattern: "^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
- description: Defines the total amount (for example `1Gi`) of local storage required for temporary EmptyDir volume (`/tmp`). Default value is `5Mi`.
- description: Template for Kafka `Pods`.
- bootstrapService:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- ipFamilyPolicy:
- type: string
- enum:
- - SingleStack
- - PreferDualStack
- - RequireDualStack
- description: "Specifies the IP Family Policy used by the service. Available options are `SingleStack`, `PreferDualStack` and `RequireDualStack`. `SingleStack` is for a single IP family. `PreferDualStack` is for two IP families on dual-stack configured clusters or a single IP family on single-stack clusters. `RequireDualStack` fails unless there are two IP families on dual-stack configured clusters. If unspecified, Kubernetes will choose the default value based on the service type."
- ipFamilies:
- type: array
- items:
- type: string
- enum:
- - IPv4
- - IPv6
- description: "Specifies the IP Families used by the service. Available options are `IPv4` and `IPv6`. If unspecified, Kubernetes will choose the default value based on the `ipFamilyPolicy` setting."
- description: Template for Kafka bootstrap `Service`.
- brokersService:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- ipFamilyPolicy:
- type: string
- enum:
- - SingleStack
- - PreferDualStack
- - RequireDualStack
- description: "Specifies the IP Family Policy used by the service. Available options are `SingleStack`, `PreferDualStack` and `RequireDualStack`. `SingleStack` is for a single IP family. `PreferDualStack` is for two IP families on dual-stack configured clusters or a single IP family on single-stack clusters. `RequireDualStack` fails unless there are two IP families on dual-stack configured clusters. If unspecified, Kubernetes will choose the default value based on the service type."
- ipFamilies:
+ description: "Defines the total amount of pod memory allocated for the temporary `EmptyDir` volume `/tmp`. Specify the allocation in memory units, for example, `100Mi` for 100 mebibytes. Default value is `5Mi`. The `/tmp` volume is backed by pod memory, not disk storage, so avoid setting a high value as it consumes pod memory resources."
+ volumes:
type: array
items:
- type: string
- enum:
- - IPv4
- - IPv6
- description: "Specifies the IP Families used by the service. Available options are `IPv4` and `IPv6`. If unspecified, Kubernetes will choose the default value based on the `ipFamilyPolicy` setting."
- description: Template for Kafka broker `Service`.
- externalBootstrapService:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
+ type: object
+ properties:
+ name:
type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- description: Template for Kafka external bootstrap `Service`.
- perPodService:
+ description: Name to use for the volume. Required.
+ secret:
+ type: object
+ properties:
+ defaultMode:
+ type: integer
+ items:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ optional:
+ type: boolean
+ secretName:
+ type: string
+ description: Secret to use populate the volume.
+ configMap:
+ type: object
+ properties:
+ defaultMode:
+ type: integer
+ items:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ description: ConfigMap to use to populate the volume.
+ emptyDir:
+ type: object
+ properties:
+ medium:
+ type: string
+ sizeLimit:
+ type: object
+ properties:
+ amount:
+ type: string
+ format:
+ type: string
+ description: EmptyDir to use to populate the volume.
+ persistentVolumeClaim:
+ type: object
+ properties:
+ claimName:
+ type: string
+ readOnly:
+ type: boolean
+ description: PersistentVolumeClaim object to use to populate the volume.
+ oneOf:
+ - properties:
+ secret: {}
+ configMap: {}
+ emptyDir: {}
+ persistentVolumeClaim: {}
+ required: []
+ description: Additional volumes that can be mounted to the pod.
+ description: Template for Entity Operator `Pods`.
+ topicOperatorContainer:
type: object
properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
+ env:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
+ description: The environment variable key.
+ value:
type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- description: Template for Kafka per-pod `Services` used for access from outside of Kubernetes.
- externalBootstrapRoute:
- type: object
- properties:
- metadata:
+ description: The environment variable value.
+ description: Environment variables which should be applied to the container.
+ securityContext:
type: object
properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
+ allowPrivilegeEscalation:
+ type: boolean
+ appArmorProfile:
type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- description: Template for Kafka external bootstrap `Route`.
- perPodRoute:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
- type: string
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ capabilities:
type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
+ properties:
+ add:
+ type: array
+ items:
+ type: string
+ drop:
+ type: array
+ items:
+ type: string
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- description: Template for Kafka per-pod `Routes` used for access from outside of OpenShift.
- externalBootstrapIngress:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
- type: string
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ seccompProfile:
type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ windowsOptions:
type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- description: Template for Kafka external bootstrap `Ingress`.
- perPodIngress:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ description: Security context for the container.
+ volumeMounts:
+ type: array
+ items:
+ type: object
+ properties:
+ mountPath:
type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
+ mountPropagation:
type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- description: Template for Kafka per-pod `Ingress` used for access from outside of Kubernetes.
- persistentVolumeClaim:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
+ name:
type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
+ readOnly:
+ type: boolean
+ recursiveReadOnly:
type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- description: Template for all Kafka `PersistentVolumeClaims`.
- podDisruptionBudget:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
+ subPath:
type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
+ subPathExpr:
type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata to apply to the `PodDisruptionBudgetTemplate` resource.
- maxUnavailable:
- type: integer
- minimum: 0
- description: "Maximum number of unavailable pods to allow automatic Pod eviction. A Pod eviction is allowed when the `maxUnavailable` number of pods or fewer are unavailable after the eviction. Setting this value to 0 prevents all voluntary evictions, so the pods must be evicted manually. Defaults to 1."
- description: Template for Kafka `PodDisruptionBudget`.
- kafkaContainer:
+ description: Additional volume mounts which should be applied to the container.
+ description: Template for the Entity Topic Operator container.
+ userOperatorContainer:
type: object
properties:
env:
@@ -8447,8 +5459,28 @@ spec:
runAsUserName:
type: string
description: Security context for the container.
- description: Template for the Kafka broker container.
- initContainer:
+ volumeMounts:
+ type: array
+ items:
+ type: object
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ recursiveReadOnly:
+ type: string
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ description: Additional volume mounts which should be applied to the container.
+ description: Template for the Entity User Operator container.
+ tlsSidecarContainer:
type: object
properties:
env:
@@ -8528,25 +5560,27 @@ spec:
runAsUserName:
type: string
description: Security context for the container.
- description: Template for the Kafka init container.
- clusterCaCert:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
+ volumeMounts:
+ type: array
+ items:
+ type: object
+ properties:
+ mountPath:
type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
+ mountPropagation:
type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- description: Template for Secret with Kafka Cluster certificate public key.
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ recursiveReadOnly:
+ type: string
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ description: Additional volume mounts which should be applied to the container.
+ description: Template for the Entity Operator TLS sidecar container.
serviceAccount:
type: object
properties:
@@ -8564,8 +5598,8 @@ spec:
type: object
description: Annotations added to the Kubernetes resource.
description: Metadata applied to the resource.
- description: Template for the Kafka service account.
- jmxSecret:
+ description: Template for the Entity Operator service account.
+ entityOperatorRole:
type: object
properties:
metadata:
@@ -8582,8 +5616,8 @@ spec:
type: object
description: Annotations added to the Kubernetes resource.
description: Metadata applied to the resource.
- description: Template for Secret of the Kafka Cluster JMX authentication.
- clusterRoleBinding:
+ description: Template for the Entity Operator Role.
+ topicOperatorRoleBinding:
type: object
properties:
metadata:
@@ -8600,8 +5634,8 @@ spec:
type: object
description: Annotations added to the Kubernetes resource.
description: Metadata applied to the resource.
- description: Template for the Kafka ClusterRoleBinding.
- podSet:
+ description: Template for the Entity Topic Operator RoleBinding.
+ userOperatorRoleBinding:
type: object
properties:
metadata:
@@ -8618,144 +5652,184 @@ spec:
type: object
description: Annotations added to the Kubernetes resource.
description: Metadata applied to the resource.
- description: Template for Kafka `StrimziPodSet` resource.
- description: Template for Kafka cluster resources. The template allows users to specify how the Kubernetes resources are generated.
- tieredStorage:
- type: object
- properties:
- remoteStorageManager:
- type: object
- properties:
- className:
- type: string
- description: The class name for the `RemoteStorageManager` implementation.
- classPath:
- type: string
- description: The class path for the `RemoteStorageManager` implementation.
- config:
- additionalProperties:
- type: string
- type: object
- description: "The additional configuration map for the `RemoteStorageManager` implementation. Keys will be automatically prefixed with `rsm.config.`, and added to Kafka broker configuration."
- description: Configuration for the Remote Storage Manager.
- type:
- type: string
- enum:
- - custom
- description: "Storage type, only 'custom' is supported at the moment."
- required:
- - type
- description: Configure the tiered storage feature for Kafka brokers.
- quotas:
- type: object
- properties:
- consumerByteRate:
- type: integer
- minimum: 0
- description: "A per-broker byte-rate quota for clients consuming from a broker, independent of their number. If clients consume at maximum speed, the quota is shared equally between all non-excluded consumers. Otherwise, the quota is divided based on each client's consumption rate."
- controllerMutationRate:
- type: number
- minimum: 0
- description: "The default client quota on the rate at which mutations are accepted per second for create topic requests, create partition requests, and delete topic requests, defined for each broker. The mutations rate is measured by the number of partitions created or deleted. Applied on a per-broker basis."
- excludedPrincipals:
- type: array
- items:
- type: string
- description: "List of principals that are excluded from the quota. The principals have to be prefixed with `User:`, for example `User:my-user;User:CN=my-other-user`."
- minAvailableBytesPerVolume:
- type: integer
- minimum: 0
- description: Stop message production if the available size (in bytes) of the storage is lower than or equal to this specified value. This condition is mutually exclusive with `minAvailableRatioPerVolume`.
- minAvailableRatioPerVolume:
- type: number
- minimum: 0
- maximum: 1
- description: Stop message production if the percentage of available storage space falls below or equals the specified ratio (set as a decimal representing a percentage). This condition is mutually exclusive with `minAvailableBytesPerVolume`.
- producerByteRate:
- type: integer
- minimum: 0
- description: "A per-broker byte-rate quota for clients producing to a broker, independent of their number. If clients produce at maximum speed, the quota is shared equally between all non-excluded producers. Otherwise, the quota is divided based on each client's production rate."
- requestPercentage:
- type: integer
- minimum: 0
- description: The default client quota limits the maximum CPU utilization of each client as a percentage of the network and I/O threads of each broker. Applied on a per-broker basis.
- type:
- type: string
- enum:
- - kafka
- - strimzi
- description: "Quotas plugin type. Currently, the supported types are `kafka` and `strimzi`. `kafka` quotas type uses Kafka's built-in quotas plugin. `strimzi` quotas type uses Strimzi quotas plugin."
- required:
- - type
- description: "Quotas plugin configuration for Kafka brokers allows setting quotas for disk usage, produce/fetch rates, and more. Supported plugin types include `kafka` (default) and `strimzi`. If not specified, the default `kafka` quotas plugin is used."
- required:
- - listeners
- description: Configuration of the Kafka cluster.
- zookeeper:
+ description: Template for the Entity Topic Operator RoleBinding.
+ description: Template for Entity Operator resources. The template allows users to specify how a `Deployment` and `Pod` is generated.
+ description: Configuration of the Entity Operator.
+ clusterCa:
type: object
properties:
- replicas:
+ generateCertificateAuthority:
+ type: boolean
+ description: If true then Certificate Authority certificates will be generated automatically. Otherwise the user will need to provide a Secret with the CA certificate. Default is true.
+ generateSecretOwnerReference:
+ type: boolean
+ description: "If `true`, the Cluster and Client CA Secrets are configured with the `ownerReference` set to the `Kafka` resource. If the `Kafka` resource is deleted when `true`, the CA Secrets are also deleted. If `false`, the `ownerReference` is disabled. If the `Kafka` resource is deleted when `false`, the CA Secrets are retained and available for reuse. Default is `true`."
+ validityDays:
type: integer
minimum: 1
- description: The number of pods in the cluster.
+ description: The number of days generated certificates should be valid for. The default is 365.
+ renewalDays:
+ type: integer
+ minimum: 1
+ description: "The number of days in the certificate renewal period. This is the number of days before the a certificate expires during which renewal actions may be performed. When `generateCertificateAuthority` is true, this will cause the generation of a new certificate. When `generateCertificateAuthority` is true, this will cause extra logging at WARN level about the pending certificate expiry. Default is 30."
+ certificateExpirationPolicy:
+ type: string
+ enum:
+ - renew-certificate
+ - replace-key
+ description: How should CA certificate expiration be handled when `generateCertificateAuthority=true`. The default is for a new CA certificate to be generated reusing the existing private key.
+ description: Configuration of the cluster certificate authority.
+ clientsCa:
+ type: object
+ properties:
+ generateCertificateAuthority:
+ type: boolean
+ description: If true then Certificate Authority certificates will be generated automatically. Otherwise the user will need to provide a Secret with the CA certificate. Default is true.
+ generateSecretOwnerReference:
+ type: boolean
+ description: "If `true`, the Cluster and Client CA Secrets are configured with the `ownerReference` set to the `Kafka` resource. If the `Kafka` resource is deleted when `true`, the CA Secrets are also deleted. If `false`, the `ownerReference` is disabled. If the `Kafka` resource is deleted when `false`, the CA Secrets are retained and available for reuse. Default is `true`."
+ validityDays:
+ type: integer
+ minimum: 1
+ description: The number of days generated certificates should be valid for. The default is 365.
+ renewalDays:
+ type: integer
+ minimum: 1
+ description: "The number of days in the certificate renewal period. This is the number of days before the a certificate expires during which renewal actions may be performed. When `generateCertificateAuthority` is true, this will cause the generation of a new certificate. When `generateCertificateAuthority` is true, this will cause extra logging at WARN level about the pending certificate expiry. Default is 30."
+ certificateExpirationPolicy:
+ type: string
+ enum:
+ - renew-certificate
+ - replace-key
+ description: How should CA certificate expiration be handled when `generateCertificateAuthority=true`. The default is for a new CA certificate to be generated reusing the existing private key.
+ description: Configuration of the clients certificate authority.
+ cruiseControl:
+ type: object
+ properties:
image:
type: string
- description: "The container image used for ZooKeeper pods. If no image name is explicitly specified, it is determined based on the Kafka version set in `spec.kafka.version`. The image names are specifically mapped to corresponding versions in the Cluster Operator configuration."
- storage:
+ description: "The container image used for Cruise Control pods. If no image name is explicitly specified, the image name corresponds to the name specified in the Cluster Operator configuration. If an image name is not defined in the Cluster Operator configuration, a default value is used."
+ tlsSidecar:
type: object
properties:
- class:
+ image:
type: string
- description: The storage class to use for dynamic volume allocation.
- deleteClaim:
- type: boolean
- description: Specifies if the persistent volume claim has to be deleted when the cluster is un-deployed.
- id:
- type: integer
- minimum: 0
- description: Storage identification number. Mandatory for storage volumes defined with a `jbod` storage type configuration.
- kraftMetadata:
+ description: The docker image for the container.
+ resources:
+ type: object
+ properties:
+ claims:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
+ x-kubernetes-int-or-string: true
+ type: object
+ description: CPU and memory resources to reserve.
+ livenessProbe:
+ type: object
+ properties:
+ initialDelaySeconds:
+ type: integer
+ minimum: 0
+ description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
+ timeoutSeconds:
+ type: integer
+ minimum: 1
+ description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
+ periodSeconds:
+ type: integer
+ minimum: 1
+ description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
+ successThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
+ failureThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
+ description: Pod liveness checking.
+ readinessProbe:
+ type: object
+ properties:
+ initialDelaySeconds:
+ type: integer
+ minimum: 0
+ description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
+ timeoutSeconds:
+ type: integer
+ minimum: 1
+ description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
+ periodSeconds:
+ type: integer
+ minimum: 1
+ description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
+ successThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
+ failureThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
+ description: Pod readiness checking.
+ logLevel:
type: string
enum:
- - shared
- description: "Specifies whether this volume should be used for storing KRaft metadata. This property is optional. When set, the only currently supported value is `shared`. At most one volume can have this property set."
- overrides:
+ - emerg
+ - alert
+ - crit
+ - err
+ - warning
+ - notice
+ - info
+ - debug
+ description: The log level for the TLS sidecar. Default value is `notice`.
+ description: TLS sidecar configuration.
+ resources:
+ type: object
+ properties:
+ claims:
type: array
items:
type: object
properties:
- class:
+ name:
type: string
- description: The storage class to use for dynamic volume allocation for this broker.
- broker:
- type: integer
- description: Id of the kafka broker (broker identifier).
- description: Overrides for individual brokers. The `overrides` field allows to specify a different configuration for different brokers.
- selector:
+ limits:
additionalProperties:
- type: string
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
+ x-kubernetes-int-or-string: true
type: object
- description: Specifies a specific persistent volume to use. It contains key:value pairs representing labels for selecting such a volume.
- size:
- type: string
- description: "When `type=persistent-claim`, defines the size of the persistent volume claim, such as 100Gi. Mandatory when `type=persistent-claim`."
- sizeLimit:
- type: string
- pattern: "^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
- description: "When type=ephemeral, defines the total amount of local storage required for this EmptyDir volume (for example 1Gi)."
- type:
- type: string
- enum:
- - ephemeral
- - persistent-claim
- description: "Storage type, must be either 'ephemeral' or 'persistent-claim'."
- required:
- - type
- description: Storage configuration (disk). Cannot be updated.
- config:
- x-kubernetes-preserve-unknown-fields: true
- type: object
- description: "The ZooKeeper broker config. Properties with the following prefixes cannot be set: server., dataDir, dataLogDir, clientPort, authProvider, quorum.auth, requireClientAuthScheme, snapshot.trust.empty, standaloneEnabled, reconfigEnabled, 4lw.commands.whitelist, secureClientPort, ssl., serverCnxnFactory, sslQuorum (with the exception of: ssl.protocol, ssl.quorum.protocol, ssl.enabledProtocols, ssl.quorum.enabledProtocols, ssl.ciphersuites, ssl.quorum.ciphersuites, ssl.hostnameVerification, ssl.quorum.hostnameVerification)."
+ description: CPU and memory resources to reserve for the Cruise Control container.
livenessProbe:
type: object
properties:
@@ -8779,7 +5853,7 @@ spec:
type: integer
minimum: 1
description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
- description: Pod liveness checking.
+ description: Pod liveness checking for the Cruise Control container.
readinessProbe:
type: object
properties:
@@ -8803,7 +5877,7 @@ spec:
type: integer
minimum: 1
description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
- description: Pod readiness checking.
+ description: Pod readiness checking for the Cruise Control container.
jvmOptions:
type: object
properties:
@@ -8835,75 +5909,7 @@ spec:
type: string
description: The system property value.
description: A map of additional system properties which will be passed using the `-D` option to the JVM.
- description: JVM Options for pods.
- jmxOptions:
- type: object
- properties:
- authentication:
- type: object
- properties:
- type:
- type: string
- enum:
- - password
- description: Authentication type. Currently the only supported types are `password`.`password` type creates a username and protected port with no TLS.
- required:
- - type
- description: Authentication configuration for connecting to the JMX port.
- description: JMX Options for Zookeeper nodes.
- resources:
- type: object
- properties:
- claims:
- type: array
- items:
- type: object
- properties:
- name:
- type: string
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
- x-kubernetes-int-or-string: true
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
- x-kubernetes-int-or-string: true
- type: object
- description: CPU and memory resources to reserve.
- metricsConfig:
- type: object
- properties:
- type:
- type: string
- enum:
- - jmxPrometheusExporter
- description: Metrics type. Only 'jmxPrometheusExporter' supported currently.
- valueFrom:
- type: object
- properties:
- configMapKeyRef:
- type: object
- properties:
- key:
- type: string
- name:
- type: string
- optional:
- type: boolean
- description: Reference to the key in the ConfigMap containing the configuration.
- description: 'ConfigMap entry where the Prometheus JMX Exporter configuration is stored. '
- required:
- - type
- - valueFrom
- description: Metrics configuration.
+ description: JVM Options for the Cruise Control container.
logging:
type: object
properties:
@@ -8934,11 +5940,11 @@ spec:
description: '`ConfigMap` entry where the logging configuration is stored. '
required:
- type
- description: Logging configuration for ZooKeeper.
+ description: Logging configuration (Log4j 2) for Cruise Control.
template:
type: object
properties:
- statefulset:
+ deployment:
type: object
properties:
metadata:
@@ -8955,31 +5961,13 @@ spec:
type: object
description: Annotations added to the Kubernetes resource.
description: Metadata applied to the resource.
- podManagementPolicy:
+ deploymentStrategy:
type: string
enum:
- - OrderedReady
- - Parallel
- description: PodManagementPolicy which will be used for this StatefulSet. Valid values are `Parallel` and `OrderedReady`. Defaults to `Parallel`.
- description: Template for ZooKeeper `StatefulSet`.
- podSet:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- description: Template for ZooKeeper `StrimziPodSet` resource.
+ - RollingUpdate
+ - Recreate
+ description: Pod replacement strategy for deployment configuration changes. Valid values are `RollingUpdate` and `Recreate`. Defaults to `RollingUpdate`.
+ description: Template for Cruise Control `Deployment`.
pod:
type: object
properties:
@@ -9083,7 +6071,144 @@ spec:
items:
type: object
properties:
- preference:
+ preference:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchFields:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: object
+ properties:
+ nodeSelectorTerms:
+ type: array
+ items:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchFields:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ podAffinity:
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ podAffinityTerm:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ matchLabelKeys:
+ type: array
+ items:
+ type: string
+ mismatchLabelKeys:
+ type: array
+ items:
+ type: string
+ namespaceSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ labelSelector:
type: object
properties:
matchExpressions:
@@ -9099,27 +6224,19 @@ spec:
type: array
items:
type: string
- matchFields:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- weight:
- type: integer
- requiredDuringSchedulingIgnoredDuringExecution:
- type: object
- properties:
- nodeSelectorTerms:
- type: array
- items:
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ matchLabelKeys:
+ type: array
+ items:
+ type: string
+ mismatchLabelKeys:
+ type: array
+ items:
+ type: string
+ namespaceSelector:
type: object
properties:
matchExpressions:
@@ -9135,20 +6252,17 @@ spec:
type: array
items:
type: string
- matchFields:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- podAffinity:
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ podAntiAffinity:
type: object
properties:
preferredDuringSchedulingIgnoredDuringExecution:
@@ -9274,251 +6388,645 @@ spec:
type: string
topologyKey:
type: string
- podAntiAffinity:
+ description: The pod's affinity rules.
+ tolerations:
+ type: array
+ items:
+ type: object
+ properties:
+ effect:
+ type: string
+ key:
+ type: string
+ operator:
+ type: string
+ tolerationSeconds:
+ type: integer
+ value:
+ type: string
+ description: The pod's tolerations.
+ topologySpreadConstraints:
+ type: array
+ items:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ matchLabelKeys:
+ type: array
+ items:
+ type: string
+ maxSkew:
+ type: integer
+ minDomains:
+ type: integer
+ nodeAffinityPolicy:
+ type: string
+ nodeTaintsPolicy:
+ type: string
+ topologyKey:
+ type: string
+ whenUnsatisfiable:
+ type: string
+ description: The pod's topology spread constraints.
+ priorityClassName:
+ type: string
+ description: 'The name of the priority class used to assign priority to the pods. '
+ schedulerName:
+ type: string
+ description: "The name of the scheduler used to dispatch this `Pod`. If not specified, the default scheduler will be used."
+ hostAliases:
+ type: array
+ items:
+ type: object
+ properties:
+ hostnames:
+ type: array
+ items:
+ type: string
+ ip:
+ type: string
+ description: The pod's HostAliases. HostAliases is an optional list of hosts and IPs that will be injected into the Pod's hosts file if specified.
+ enableServiceLinks:
+ type: boolean
+ description: Indicates whether information about services should be injected into Pod's environment variables.
+ tmpDirSizeLimit:
+ type: string
+ pattern: "^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
+ description: "Defines the total amount of pod memory allocated for the temporary `EmptyDir` volume `/tmp`. Specify the allocation in memory units, for example, `100Mi` for 100 mebibytes. Default value is `5Mi`. The `/tmp` volume is backed by pod memory, not disk storage, so avoid setting a high value as it consumes pod memory resources."
+ volumes:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: Name to use for the volume. Required.
+ secret:
+ type: object
+ properties:
+ defaultMode:
+ type: integer
+ items:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ optional:
+ type: boolean
+ secretName:
+ type: string
+ description: Secret to use populate the volume.
+ configMap:
+ type: object
+ properties:
+ defaultMode:
+ type: integer
+ items:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ description: ConfigMap to use to populate the volume.
+ emptyDir:
+ type: object
+ properties:
+ medium:
+ type: string
+ sizeLimit:
+ type: object
+ properties:
+ amount:
+ type: string
+ format:
+ type: string
+ description: EmptyDir to use to populate the volume.
+ persistentVolumeClaim:
+ type: object
+ properties:
+ claimName:
+ type: string
+ readOnly:
+ type: boolean
+ description: PersistentVolumeClaim object to use to populate the volume.
+ oneOf:
+ - properties:
+ secret: {}
+ configMap: {}
+ emptyDir: {}
+ persistentVolumeClaim: {}
+ required: []
+ description: Additional volumes that can be mounted to the pod.
+ description: Template for Cruise Control `Pods`.
+ apiService:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ ipFamilyPolicy:
+ type: string
+ enum:
+ - SingleStack
+ - PreferDualStack
+ - RequireDualStack
+ description: "Specifies the IP Family Policy used by the service. Available options are `SingleStack`, `PreferDualStack` and `RequireDualStack`. `SingleStack` is for a single IP family. `PreferDualStack` is for two IP families on dual-stack configured clusters or a single IP family on single-stack clusters. `RequireDualStack` fails unless there are two IP families on dual-stack configured clusters. If unspecified, Kubernetes will choose the default value based on the service type."
+ ipFamilies:
+ type: array
+ items:
+ type: string
+ enum:
+ - IPv4
+ - IPv6
+ description: "Specifies the IP Families used by the service. Available options are `IPv4` and `IPv6`. If unspecified, Kubernetes will choose the default value based on the `ipFamilyPolicy` setting."
+ description: Template for Cruise Control API `Service`.
+ podDisruptionBudget:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata to apply to the `PodDisruptionBudgetTemplate` resource.
+ maxUnavailable:
+ type: integer
+ minimum: 0
+ description: "Maximum number of unavailable pods to allow automatic Pod eviction. A Pod eviction is allowed when the `maxUnavailable` number of pods or fewer are unavailable after the eviction. Setting this value to 0 prevents all voluntary evictions, so the pods must be evicted manually. Defaults to 1."
+ description: Template for Cruise Control `PodDisruptionBudget`.
+ cruiseControlContainer:
+ type: object
+ properties:
+ env:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: The environment variable key.
+ value:
+ type: string
+ description: The environment variable value.
+ description: Environment variables which should be applied to the container.
+ securityContext:
+ type: object
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ appArmorProfile:
type: object
properties:
- preferredDuringSchedulingIgnoredDuringExecution:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ capabilities:
+ type: object
+ properties:
+ add:
type: array
items:
- type: object
- properties:
- podAffinityTerm:
- type: object
- properties:
- labelSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- matchLabelKeys:
- type: array
- items:
- type: string
- mismatchLabelKeys:
- type: array
- items:
- type: string
- namespaceSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- namespaces:
- type: array
- items:
- type: string
- topologyKey:
- type: string
- weight:
- type: integer
- requiredDuringSchedulingIgnoredDuringExecution:
+ type: string
+ drop:
type: array
items:
- type: object
- properties:
- labelSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- matchLabelKeys:
- type: array
- items:
- type: string
- mismatchLabelKeys:
- type: array
- items:
- type: string
- namespaceSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- namespaces:
- type: array
- items:
- type: string
- topologyKey:
- type: string
- description: The pod's affinity rules.
- tolerations:
+ type: string
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
+ type: object
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ seccompProfile:
+ type: object
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ windowsOptions:
+ type: object
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ description: Security context for the container.
+ volumeMounts:
+ type: array
+ items:
+ type: object
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ recursiveReadOnly:
+ type: string
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ description: Additional volume mounts which should be applied to the container.
+ description: Template for the Cruise Control container.
+ tlsSidecarContainer:
+ type: object
+ properties:
+ env:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: The environment variable key.
+ value:
+ type: string
+ description: The environment variable value.
+ description: Environment variables which should be applied to the container.
+ securityContext:
+ type: object
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ appArmorProfile:
+ type: object
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ capabilities:
+ type: object
+ properties:
+ add:
+ type: array
+ items:
+ type: string
+ drop:
+ type: array
+ items:
+ type: string
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
+ type: object
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ seccompProfile:
+ type: object
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ windowsOptions:
+ type: object
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ description: Security context for the container.
+ volumeMounts:
type: array
items:
type: object
properties:
- effect:
+ mountPath:
type: string
- key:
+ mountPropagation:
type: string
- operator:
+ name:
type: string
- tolerationSeconds:
- type: integer
- value:
+ readOnly:
+ type: boolean
+ recursiveReadOnly:
type: string
- description: The pod's tolerations.
- topologySpreadConstraints:
- type: array
- items:
- type: object
- properties:
- labelSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- matchLabelKeys:
- type: array
- items:
- type: string
- maxSkew:
- type: integer
- minDomains:
- type: integer
- nodeAffinityPolicy:
+ subPath:
type: string
- nodeTaintsPolicy:
+ subPathExpr:
type: string
- topologyKey:
+ description: Additional volume mounts which should be applied to the container.
+ description: Template for the Cruise Control TLS sidecar container.
+ serviceAccount:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
type: string
- whenUnsatisfiable:
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
type: string
- description: The pod's topology spread constraints.
- priorityClassName:
- type: string
- description: 'The name of the priority class used to assign priority to the pods. '
- schedulerName:
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ description: Template for the Cruise Control service account.
+ description: "Template to specify how Cruise Control resources, `Deployments` and `Pods`, are generated."
+ brokerCapacity:
+ type: object
+ properties:
+ disk:
+ type: string
+ pattern: "^[0-9]+([.][0-9]*)?([KMGTPE]i?|e[0-9]+)?$"
+ description: "Broker capacity for disk in bytes. Use a number value with either standard Kubernetes byte units (K, M, G, or T), their bibyte (power of two) equivalents (Ki, Mi, Gi, or Ti), or a byte value with or without E notation. For example, 100000M, 100000Mi, 104857600000, or 1e+11."
+ cpuUtilization:
+ type: integer
+ minimum: 0
+ maximum: 100
+ description: Broker capacity for CPU resource utilization as a percentage (0 - 100).
+ cpu:
+ type: string
+ pattern: "^[0-9]+([.][0-9]{0,3}|[m]?)$"
+ description: "Broker capacity for CPU resource in cores or millicores. For example, 1, 1.500, 1500m. For more information on valid CPU resource units see https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu."
+ inboundNetwork:
+ type: string
+ pattern: "^[0-9]+([KMG]i?)?B/s$"
+ description: "Broker capacity for inbound network throughput in bytes per second. Use an integer value with standard Kubernetes byte units (K, M, G) or their bibyte (power of two) equivalents (Ki, Mi, Gi) per second. For example, 10000KiB/s."
+ outboundNetwork:
+ type: string
+ pattern: "^[0-9]+([KMG]i?)?B/s$"
+ description: "Broker capacity for outbound network throughput in bytes per second. Use an integer value with standard Kubernetes byte units (K, M, G) or their bibyte (power of two) equivalents (Ki, Mi, Gi) per second. For example, 10000KiB/s."
+ overrides:
+ type: array
+ items:
+ type: object
+ properties:
+ brokers:
+ type: array
+ items:
+ type: integer
+ description: List of Kafka brokers (broker identifiers).
+ cpu:
+ type: string
+ pattern: "^[0-9]+([.][0-9]{0,3}|[m]?)$"
+ description: "Broker capacity for CPU resource in cores or millicores. For example, 1, 1.500, 1500m. For more information on valid CPU resource units see https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu."
+ inboundNetwork:
+ type: string
+ pattern: "^[0-9]+([KMG]i?)?B/s$"
+ description: "Broker capacity for inbound network throughput in bytes per second. Use an integer value with standard Kubernetes byte units (K, M, G) or their bibyte (power of two) equivalents (Ki, Mi, Gi) per second. For example, 10000KiB/s."
+ outboundNetwork:
+ type: string
+ pattern: "^[0-9]+([KMG]i?)?B/s$"
+ description: "Broker capacity for outbound network throughput in bytes per second. Use an integer value with standard Kubernetes byte units (K, M, G) or their bibyte (power of two) equivalents (Ki, Mi, Gi) per second. For example, 10000KiB/s."
+ required:
+ - brokers
+ description: Overrides for individual brokers. The `overrides` property lets you specify a different capacity configuration for different brokers.
+ description: The Cruise Control `brokerCapacity` configuration.
+ config:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ description: "The Cruise Control configuration. For a full list of configuration options refer to https://github.com/linkedin/cruise-control/wiki/Configurations. Note that properties with the following prefixes cannot be set: bootstrap.servers, client.id, zookeeper., network., security., failed.brokers.zk.path,webserver.http., webserver.api.urlprefix, webserver.session.path, webserver.accesslog., two.step., request.reason.required,metric.reporter.sampler.bootstrap.servers, capacity.config.file, self.healing., ssl., kafka.broker.failure.detection.enable, topic.config.provider.class (with the exception of: ssl.cipher.suites, ssl.protocol, ssl.enabled.protocols, webserver.http.cors.enabled, webserver.http.cors.origin, webserver.http.cors.exposeheaders, webserver.security.enable, webserver.ssl.enable)."
+ metricsConfig:
+ type: object
+ properties:
+ type:
+ type: string
+ enum:
+ - jmxPrometheusExporter
+ description: Metrics type. Only 'jmxPrometheusExporter' supported currently.
+ valueFrom:
+ type: object
+ properties:
+ configMapKeyRef:
+ type: object
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ description: Reference to the key in the ConfigMap containing the configuration.
+ description: 'ConfigMap entry where the Prometheus JMX Exporter configuration is stored. '
+ required:
+ - type
+ - valueFrom
+ description: Metrics configuration.
+ apiUsers:
+ type: object
+ properties:
+ type:
+ type: string
+ enum:
+ - hashLoginService
+ description: "Type of the Cruise Control API users configuration. Supported format is: `hashLoginService`."
+ valueFrom:
+ type: object
+ properties:
+ secretKeyRef:
+ type: object
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ description: Selects a key of a Secret in the resource's namespace.
+ description: Secret from which the custom Cruise Control API authentication credentials are read.
+ required:
+ - type
+ - valueFrom
+ description: Configuration of the Cruise Control REST API users.
+ description: Configuration for Cruise Control deployment. Deploys a Cruise Control instance when specified.
+ jmxTrans:
+ type: object
+ properties:
+ image:
+ type: string
+ description: The image to use for the JmxTrans.
+ outputDefinitions:
+ type: array
+ items:
+ type: object
+ properties:
+ outputType:
+ type: string
+ description: "Template for setting the format of the data that will be pushed.For more information see https://github.com/jmxtrans/jmxtrans/wiki/OutputWriters[JmxTrans OutputWriters]."
+ host:
+ type: string
+ description: The DNS/hostname of the remote host that the data is pushed to.
+ port:
+ type: integer
+ description: The port of the remote host that the data is pushed to.
+ flushDelayInSeconds:
+ type: integer
+ description: How many seconds the JmxTrans waits before pushing a new set of data out.
+ typeNames:
+ type: array
+ items:
type: string
- description: "The name of the scheduler used to dispatch this `Pod`. If not specified, the default scheduler will be used."
- hostAliases:
- type: array
- items:
- type: object
- properties:
- hostnames:
- type: array
- items:
- type: string
- ip:
- type: string
- description: The pod's HostAliases. HostAliases is an optional list of hosts and IPs that will be injected into the Pod's hosts file if specified.
- enableServiceLinks:
- type: boolean
- description: Indicates whether information about services should be injected into Pod's environment variables.
- tmpDirSizeLimit:
+ description: "Template for filtering data to be included in response to a wildcard query. For more information see https://github.com/jmxtrans/jmxtrans/wiki/Queries[JmxTrans queries]."
+ name:
+ type: string
+ description: Template for setting the name of the output definition. This is used to identify where to send the results of queries should be sent.
+ required:
+ - outputType
+ - name
+ description: "Defines the output hosts that will be referenced later on. For more information on these properties see, xref:type-JmxTransOutputDefinitionTemplate-reference[`JmxTransOutputDefinitionTemplate` schema reference]."
+ logLevel:
+ type: string
+ description: "Sets the logging level of the JmxTrans deployment.For more information see, https://github.com/jmxtrans/jmxtrans-agent/wiki/Troubleshooting[JmxTrans Logging Level]."
+ kafkaQueries:
+ type: array
+ items:
+ type: object
+ properties:
+ targetMBean:
+ type: string
+ description: If using wildcards instead of a specific MBean then the data is gathered from multiple MBeans. Otherwise if specifying an MBean then data is gathered from that specified MBean.
+ attributes:
+ type: array
+ items:
type: string
- pattern: "^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
- description: Defines the total amount (for example `1Gi`) of local storage required for temporary EmptyDir volume (`/tmp`). Default value is `5Mi`.
- description: Template for ZooKeeper `Pods`.
- clientService:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- ipFamilyPolicy:
+ description: Determine which attributes of the targeted MBean should be included.
+ outputs:
+ type: array
+ items:
type: string
- enum:
- - SingleStack
- - PreferDualStack
- - RequireDualStack
- description: "Specifies the IP Family Policy used by the service. Available options are `SingleStack`, `PreferDualStack` and `RequireDualStack`. `SingleStack` is for a single IP family. `PreferDualStack` is for two IP families on dual-stack configured clusters or a single IP family on single-stack clusters. `RequireDualStack` fails unless there are two IP families on dual-stack configured clusters. If unspecified, Kubernetes will choose the default value based on the service type."
- ipFamilies:
- type: array
- items:
+ description: "List of the names of output definitions specified in the spec.kafka.jmxTrans.outputDefinitions that have defined where JMX metrics are pushed to, and in which data format."
+ required:
+ - targetMBean
+ - attributes
+ - outputs
+ description: "Queries to send to the Kafka brokers to define what data should be read from each broker. For more information on these properties see, xref:type-JmxTransQueryTemplate-reference[`JmxTransQueryTemplate` schema reference]."
+ resources:
+ type: object
+ properties:
+ claims:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
type: string
- enum:
- - IPv4
- - IPv6
- description: "Specifies the IP Families used by the service. Available options are `IPv4` and `IPv6`. If unspecified, Kubernetes will choose the default value based on the `ipFamilyPolicy` setting."
- description: Template for ZooKeeper client `Service`.
- nodesService:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
+ x-kubernetes-int-or-string: true
+ type: object
+ description: CPU and memory resources to reserve.
+ template:
+ type: object
+ properties:
+ deployment:
type: object
properties:
metadata:
@@ -9535,23 +7043,14 @@ spec:
type: object
description: Annotations added to the Kubernetes resource.
description: Metadata applied to the resource.
- ipFamilyPolicy:
+ deploymentStrategy:
type: string
enum:
- - SingleStack
- - PreferDualStack
- - RequireDualStack
- description: "Specifies the IP Family Policy used by the service. Available options are `SingleStack`, `PreferDualStack` and `RequireDualStack`. `SingleStack` is for a single IP family. `PreferDualStack` is for two IP families on dual-stack configured clusters or a single IP family on single-stack clusters. `RequireDualStack` fails unless there are two IP families on dual-stack configured clusters. If unspecified, Kubernetes will choose the default value based on the service type."
- ipFamilies:
- type: array
- items:
- type: string
- enum:
- - IPv4
- - IPv6
- description: "Specifies the IP Families used by the service. Available options are `IPv4` and `IPv6`. If unspecified, Kubernetes will choose the default value based on the `ipFamilyPolicy` setting."
- description: Template for ZooKeeper nodes `Service`.
- persistentVolumeClaim:
+ - RollingUpdate
+ - Recreate
+ description: Pod replacement strategy for deployment configuration changes. Valid values are `RollingUpdate` and `Recreate`. Defaults to `RollingUpdate`.
+ description: Template for JmxTrans `Deployment`.
+ pod:
type: object
properties:
metadata:
@@ -9568,49 +7067,17 @@ spec:
type: object
description: Annotations added to the Kubernetes resource.
description: Metadata applied to the resource.
- description: Template for all ZooKeeper `PersistentVolumeClaims`.
- podDisruptionBudget:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata to apply to the `PodDisruptionBudgetTemplate` resource.
- maxUnavailable:
- type: integer
- minimum: 0
- description: "Maximum number of unavailable pods to allow automatic Pod eviction. A Pod eviction is allowed when the `maxUnavailable` number of pods or fewer are unavailable after the eviction. Setting this value to 0 prevents all voluntary evictions, so the pods must be evicted manually. Defaults to 1."
- description: Template for ZooKeeper `PodDisruptionBudget`.
- zookeeperContainer:
- type: object
- properties:
- env:
+ imagePullSecrets:
type: array
items:
type: object
properties:
name:
type: string
- description: The environment variable key.
- value:
- type: string
- description: The environment variable value.
- description: Environment variables which should be applied to the container.
+ description: "List of references to secrets in the same namespace to use for pulling any of the images used by this Pod. When the `STRIMZI_IMAGE_PULL_SECRETS` environment variable in Cluster Operator and the `imagePullSecrets` option are specified, only the `imagePullSecrets` variable is used and the `STRIMZI_IMAGE_PULL_SECRETS` variable is ignored."
securityContext:
type: object
properties:
- allowPrivilegeEscalation:
- type: boolean
appArmorProfile:
type: object
properties:
@@ -9618,549 +7085,781 @@ spec:
type: string
type:
type: string
- capabilities:
+ fsGroup:
+ type: integer
+ fsGroupChangePolicy:
+ type: string
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
+ type: object
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ seccompProfile:
+ type: object
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ supplementalGroups:
+ type: array
+ items:
+ type: integer
+ sysctls:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ windowsOptions:
+ type: object
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ description: Configures pod-level security attributes and common container settings.
+ terminationGracePeriodSeconds:
+ type: integer
+ minimum: 0
+ description: "The grace period is the duration in seconds after the processes running in the pod are sent a termination signal, and the time when the processes are forcibly halted with a kill signal. Set this value to longer than the expected cleanup time for your process. Value must be a non-negative integer. A zero value indicates delete immediately. You might need to increase the grace period for very large Kafka clusters, so that the Kafka brokers have enough time to transfer their work to another broker before they are terminated. Defaults to 30 seconds."
+ affinity:
+ type: object
+ properties:
+ nodeAffinity:
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ preference:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchFields:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: object
+ properties:
+ nodeSelectorTerms:
+ type: array
+ items:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchFields:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ podAffinity:
type: object
properties:
- add:
+ preferredDuringSchedulingIgnoredDuringExecution:
type: array
items:
- type: string
- drop:
+ type: object
+ properties:
+ podAffinityTerm:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ matchLabelKeys:
+ type: array
+ items:
+ type: string
+ mismatchLabelKeys:
+ type: array
+ items:
+ type: string
+ namespaceSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
type: array
items:
- type: string
- privileged:
- type: boolean
- procMount:
- type: string
- readOnlyRootFilesystem:
- type: boolean
- runAsGroup:
- type: integer
- runAsNonRoot:
- type: boolean
- runAsUser:
- type: integer
- seLinuxOptions:
- type: object
- properties:
- level:
- type: string
- role:
- type: string
- type:
- type: string
- user:
- type: string
- seccompProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- windowsOptions:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ matchLabelKeys:
+ type: array
+ items:
+ type: string
+ mismatchLabelKeys:
+ type: array
+ items:
+ type: string
+ namespaceSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ podAntiAffinity:
type: object
properties:
- gmsaCredentialSpec:
- type: string
- gmsaCredentialSpecName:
- type: string
- hostProcess:
- type: boolean
- runAsUserName:
- type: string
- description: Security context for the container.
- description: Template for the ZooKeeper container.
- serviceAccount:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ podAffinityTerm:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ matchLabelKeys:
+ type: array
+ items:
+ type: string
+ mismatchLabelKeys:
+ type: array
+ items:
+ type: string
+ namespaceSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ matchLabelKeys:
+ type: array
+ items:
+ type: string
+ mismatchLabelKeys:
+ type: array
+ items:
+ type: string
+ namespaceSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ description: The pod's affinity rules.
+ tolerations:
+ type: array
+ items:
+ type: object
+ properties:
+ effect:
type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
+ key:
type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- description: Template for the ZooKeeper service account.
- jmxSecret:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
+ operator:
type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
+ tolerationSeconds:
+ type: integer
+ value:
type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- description: Template for Secret of the Zookeeper Cluster JMX authentication.
- description: Template for ZooKeeper cluster resources. The template allows users to specify how the Kubernetes resources are generated.
- required:
- - replicas
- - storage
- description: Configuration of the ZooKeeper cluster. This section is required when running a ZooKeeper-based Apache Kafka cluster.
- entityOperator:
- type: object
- properties:
- topicOperator:
- type: object
- properties:
- watchedNamespace:
- type: string
- description: The namespace the Topic Operator should watch.
- image:
- type: string
- description: The image to use for the Topic Operator.
- reconciliationIntervalSeconds:
- type: integer
- minimum: 0
- description: Interval between periodic reconciliations in seconds. Ignored if reconciliationIntervalMs is set.
- reconciliationIntervalMs:
- type: integer
- minimum: 0
- description: Interval between periodic reconciliations in milliseconds.
- zookeeperSessionTimeoutSeconds:
- type: integer
- minimum: 0
- description: Timeout for the ZooKeeper session.
- startupProbe:
- type: object
- properties:
- initialDelaySeconds:
- type: integer
- minimum: 0
- description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
- timeoutSeconds:
- type: integer
- minimum: 1
- description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
- periodSeconds:
- type: integer
- minimum: 1
- description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
- successThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
- failureThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
- description: Pod startup checking.
- livenessProbe:
- type: object
- properties:
- initialDelaySeconds:
- type: integer
- minimum: 0
- description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
- timeoutSeconds:
- type: integer
- minimum: 1
- description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
- periodSeconds:
- type: integer
- minimum: 1
- description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
- successThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
- failureThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
- description: Pod liveness checking.
- readinessProbe:
- type: object
- properties:
- initialDelaySeconds:
- type: integer
- minimum: 0
- description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
- timeoutSeconds:
- type: integer
- minimum: 1
- description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
- periodSeconds:
- type: integer
- minimum: 1
- description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
- successThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
- failureThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
- description: Pod readiness checking.
- resources:
- type: object
- properties:
- claims:
+ description: The pod's tolerations.
+ topologySpreadConstraints:
type: array
items:
type: object
properties:
- name:
- type: string
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
- x-kubernetes-int-or-string: true
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
- x-kubernetes-int-or-string: true
- type: object
- description: CPU and memory resources to reserve.
- topicMetadataMaxAttempts:
- type: integer
- minimum: 0
- description: The number of attempts at getting topic metadata.
- logging:
- type: object
- properties:
- loggers:
- additionalProperties:
- type: string
- type: object
- description: A Map from logger name to logger level.
- type:
- type: string
- enum:
- - inline
- - external
- description: "Logging type, must be either 'inline' or 'external'."
- valueFrom:
- type: object
- properties:
- configMapKeyRef:
- type: object
- properties:
- key:
- type: string
- name:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ matchLabelKeys:
+ type: array
+ items:
type: string
- optional:
- type: boolean
- description: Reference to the key in the ConfigMap containing the configuration.
- description: '`ConfigMap` entry where the logging configuration is stored. '
- required:
- - type
- description: Logging configuration.
- jvmOptions:
- type: object
- properties:
- "-XX":
- additionalProperties:
- type: string
- type: object
- description: A map of -XX options to the JVM.
- "-Xmx":
+ maxSkew:
+ type: integer
+ minDomains:
+ type: integer
+ nodeAffinityPolicy:
+ type: string
+ nodeTaintsPolicy:
+ type: string
+ topologyKey:
+ type: string
+ whenUnsatisfiable:
+ type: string
+ description: The pod's topology spread constraints.
+ priorityClassName:
type: string
- pattern: "^[0-9]+[mMgG]?$"
- description: -Xmx option to to the JVM.
- "-Xms":
+ description: 'The name of the priority class used to assign priority to the pods. '
+ schedulerName:
type: string
- pattern: "^[0-9]+[mMgG]?$"
- description: -Xms option to to the JVM.
- gcLoggingEnabled:
+ description: "The name of the scheduler used to dispatch this `Pod`. If not specified, the default scheduler will be used."
+ hostAliases:
+ type: array
+ items:
+ type: object
+ properties:
+ hostnames:
+ type: array
+ items:
+ type: string
+ ip:
+ type: string
+ description: The pod's HostAliases. HostAliases is an optional list of hosts and IPs that will be injected into the Pod's hosts file if specified.
+ enableServiceLinks:
type: boolean
- description: Specifies whether the Garbage Collection logging is enabled. The default is false.
- javaSystemProperties:
+ description: Indicates whether information about services should be injected into Pod's environment variables.
+ tmpDirSizeLimit:
+ type: string
+ pattern: "^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
+ description: "Defines the total amount of pod memory allocated for the temporary `EmptyDir` volume `/tmp`. Specify the allocation in memory units, for example, `100Mi` for 100 mebibytes. Default value is `5Mi`. The `/tmp` volume is backed by pod memory, not disk storage, so avoid setting a high value as it consumes pod memory resources."
+ volumes:
type: array
items:
type: object
properties:
name:
type: string
- description: The system property name.
- value:
- type: string
- description: The system property value.
- description: A map of additional system properties which will be passed using the `-D` option to the JVM.
- description: JVM Options for pods.
- description: Configuration of the Topic Operator.
- userOperator:
- type: object
- properties:
- watchedNamespace:
- type: string
- description: The namespace the User Operator should watch.
- image:
- type: string
- description: The image to use for the User Operator.
- reconciliationIntervalSeconds:
- type: integer
- minimum: 0
- description: Interval between periodic reconciliations in seconds. Ignored if reconciliationIntervalMs is set.
- reconciliationIntervalMs:
- type: integer
- minimum: 0
- description: Interval between periodic reconciliations in milliseconds.
- zookeeperSessionTimeoutSeconds:
- type: integer
- minimum: 0
- description: Timeout for the ZooKeeper session.
- secretPrefix:
- type: string
- description: The prefix that will be added to the KafkaUser name to be used as the Secret name.
- livenessProbe:
- type: object
- properties:
- initialDelaySeconds:
- type: integer
- minimum: 0
- description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
- timeoutSeconds:
- type: integer
- minimum: 1
- description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
- periodSeconds:
- type: integer
- minimum: 1
- description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
- successThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
- failureThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
- description: Pod liveness checking.
- readinessProbe:
- type: object
- properties:
- initialDelaySeconds:
- type: integer
- minimum: 0
- description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
- timeoutSeconds:
- type: integer
- minimum: 1
- description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
- periodSeconds:
- type: integer
- minimum: 1
- description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
- successThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
- failureThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
- description: Pod readiness checking.
- resources:
+ description: Name to use for the volume. Required.
+ secret:
+ type: object
+ properties:
+ defaultMode:
+ type: integer
+ items:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ optional:
+ type: boolean
+ secretName:
+ type: string
+ description: Secret to use populate the volume.
+ configMap:
+ type: object
+ properties:
+ defaultMode:
+ type: integer
+ items:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ description: ConfigMap to use to populate the volume.
+ emptyDir:
+ type: object
+ properties:
+ medium:
+ type: string
+ sizeLimit:
+ type: object
+ properties:
+ amount:
+ type: string
+ format:
+ type: string
+ description: EmptyDir to use to populate the volume.
+ persistentVolumeClaim:
+ type: object
+ properties:
+ claimName:
+ type: string
+ readOnly:
+ type: boolean
+ description: PersistentVolumeClaim object to use to populate the volume.
+ oneOf:
+ - properties:
+ secret: {}
+ configMap: {}
+ emptyDir: {}
+ persistentVolumeClaim: {}
+ required: []
+ description: Additional volumes that can be mounted to the pod.
+ description: Template for JmxTrans `Pods`.
+ container:
type: object
properties:
- claims:
+ env:
type: array
items:
type: object
properties:
name:
type: string
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
- x-kubernetes-int-or-string: true
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
- x-kubernetes-int-or-string: true
- type: object
- description: CPU and memory resources to reserve.
- logging:
- type: object
- properties:
- loggers:
- additionalProperties:
- type: string
- type: object
- description: A Map from logger name to logger level.
- type:
- type: string
- enum:
- - inline
- - external
- description: "Logging type, must be either 'inline' or 'external'."
- valueFrom:
+ description: The environment variable key.
+ value:
+ type: string
+ description: The environment variable value.
+ description: Environment variables which should be applied to the container.
+ securityContext:
type: object
properties:
- configMapKeyRef:
+ allowPrivilegeEscalation:
+ type: boolean
+ appArmorProfile:
+ type: object
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ capabilities:
+ type: object
+ properties:
+ add:
+ type: array
+ items:
+ type: string
+ drop:
+ type: array
+ items:
+ type: string
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
type: object
properties:
- key:
+ level:
type: string
- name:
+ role:
type: string
- optional:
+ type:
+ type: string
+ user:
+ type: string
+ seccompProfile:
+ type: object
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ windowsOptions:
+ type: object
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
type: boolean
- description: Reference to the key in the ConfigMap containing the configuration.
- description: '`ConfigMap` entry where the logging configuration is stored. '
- required:
- - type
- description: Logging configuration.
- jvmOptions:
- type: object
- properties:
- "-XX":
- additionalProperties:
- type: string
- type: object
- description: A map of -XX options to the JVM.
- "-Xmx":
- type: string
- pattern: "^[0-9]+[mMgG]?$"
- description: -Xmx option to to the JVM.
- "-Xms":
- type: string
- pattern: "^[0-9]+[mMgG]?$"
- description: -Xms option to to the JVM.
- gcLoggingEnabled:
- type: boolean
- description: Specifies whether the Garbage Collection logging is enabled. The default is false.
- javaSystemProperties:
+ runAsUserName:
+ type: string
+ description: Security context for the container.
+ volumeMounts:
type: array
items:
type: object
properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
name:
type: string
- description: The system property name.
- value:
+ readOnly:
+ type: boolean
+ recursiveReadOnly:
type: string
- description: The system property value.
- description: A map of additional system properties which will be passed using the `-D` option to the JVM.
- description: JVM Options for pods.
- description: Configuration of the User Operator.
- tlsSidecar:
- type: object
- properties:
- image:
- type: string
- description: The docker image for the container.
- resources:
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ description: Additional volume mounts which should be applied to the container.
+ description: Template for JmxTrans container.
+ serviceAccount:
type: object
properties:
- claims:
- type: array
- items:
- type: object
- properties:
- name:
- type: string
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
- x-kubernetes-int-or-string: true
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
- x-kubernetes-int-or-string: true
+ metadata:
type: object
- description: CPU and memory resources to reserve.
- livenessProbe:
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ description: Template for the JmxTrans service account.
+ description: Template for JmxTrans resources.
+ required:
+ - outputDefinitions
+ - kafkaQueries
+ description: "As of Strimzi 0.35.0, JMXTrans is not supported anymore and this option is ignored."
+ kafkaExporter:
+ type: object
+ properties:
+ image:
+ type: string
+ description: "The container image used for the Kafka Exporter pods. If no image name is explicitly specified, the image name corresponds to the version specified in the Cluster Operator configuration. If an image name is not defined in the Cluster Operator configuration, a default value is used."
+ groupRegex:
+ type: string
+ description: Regular expression to specify which consumer groups to collect. Default value is `.*`.
+ topicRegex:
+ type: string
+ description: Regular expression to specify which topics to collect. Default value is `.*`.
+ groupExcludeRegex:
+ type: string
+ description: Regular expression to specify which consumer groups to exclude.
+ topicExcludeRegex:
+ type: string
+ description: Regular expression to specify which topics to exclude.
+ resources:
+ type: object
+ properties:
+ claims:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
+ x-kubernetes-int-or-string: true
type: object
- properties:
- initialDelaySeconds:
- type: integer
- minimum: 0
- description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
- timeoutSeconds:
- type: integer
- minimum: 1
- description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
- periodSeconds:
- type: integer
- minimum: 1
- description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
- successThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
- failureThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
- description: Pod liveness checking.
- readinessProbe:
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
+ x-kubernetes-int-or-string: true
type: object
- properties:
- initialDelaySeconds:
- type: integer
- minimum: 0
- description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
- timeoutSeconds:
- type: integer
- minimum: 1
- description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
- periodSeconds:
- type: integer
- minimum: 1
- description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
- successThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
- failureThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
- description: Pod readiness checking.
- logLevel:
- type: string
- enum:
- - emerg
- - alert
- - crit
- - err
- - warning
- - notice
- - info
- - debug
- description: The log level for the TLS sidecar. Default value is `notice`.
- description: TLS sidecar configuration.
+ description: CPU and memory resources to reserve.
+ logging:
+ type: string
+ description: "Only log messages with the given severity or above. Valid levels: [`info`, `debug`, `trace`]. Default log level is `info`."
+ livenessProbe:
+ type: object
+ properties:
+ initialDelaySeconds:
+ type: integer
+ minimum: 0
+ description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
+ timeoutSeconds:
+ type: integer
+ minimum: 1
+ description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
+ periodSeconds:
+ type: integer
+ minimum: 1
+ description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
+ successThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
+ failureThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
+ description: Pod liveness check.
+ readinessProbe:
+ type: object
+ properties:
+ initialDelaySeconds:
+ type: integer
+ minimum: 0
+ description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
+ timeoutSeconds:
+ type: integer
+ minimum: 1
+ description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
+ periodSeconds:
+ type: integer
+ minimum: 1
+ description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
+ successThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
+ failureThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
+ description: Pod readiness check.
+ enableSaramaLogging:
+ type: boolean
+ description: "Enable Sarama logging, a Go client library used by the Kafka Exporter."
+ showAllOffsets:
+ type: boolean
+ description: "Whether show the offset/lag for all consumer group, otherwise, only show connected consumer groups."
template:
type: object
properties:
@@ -10187,7 +7886,7 @@ spec:
- RollingUpdate
- Recreate
description: Pod replacement strategy for deployment configuration changes. Valid values are `RollingUpdate` and `Recreate`. Defaults to `RollingUpdate`.
- description: Template for Entity Operator `Deployment`.
+ description: Template for Kafka Exporter `Deployment`.
pod:
type: object
properties:
@@ -10669,898 +8368,1225 @@ spec:
description: The pod's topology spread constraints.
priorityClassName:
type: string
- description: 'The name of the priority class used to assign priority to the pods. '
- schedulerName:
+ description: 'The name of the priority class used to assign priority to the pods. '
+ schedulerName:
+ type: string
+ description: "The name of the scheduler used to dispatch this `Pod`. If not specified, the default scheduler will be used."
+ hostAliases:
+ type: array
+ items:
+ type: object
+ properties:
+ hostnames:
+ type: array
+ items:
+ type: string
+ ip:
+ type: string
+ description: The pod's HostAliases. HostAliases is an optional list of hosts and IPs that will be injected into the Pod's hosts file if specified.
+ enableServiceLinks:
+ type: boolean
+ description: Indicates whether information about services should be injected into Pod's environment variables.
+ tmpDirSizeLimit:
+ type: string
+ pattern: "^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
+ description: "Defines the total amount of pod memory allocated for the temporary `EmptyDir` volume `/tmp`. Specify the allocation in memory units, for example, `100Mi` for 100 mebibytes. Default value is `5Mi`. The `/tmp` volume is backed by pod memory, not disk storage, so avoid setting a high value as it consumes pod memory resources."
+ volumes:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: Name to use for the volume. Required.
+ secret:
+ type: object
+ properties:
+ defaultMode:
+ type: integer
+ items:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ optional:
+ type: boolean
+ secretName:
+ type: string
+ description: Secret to use populate the volume.
+ configMap:
+ type: object
+ properties:
+ defaultMode:
+ type: integer
+ items:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ description: ConfigMap to use to populate the volume.
+ emptyDir:
+ type: object
+ properties:
+ medium:
+ type: string
+ sizeLimit:
+ type: object
+ properties:
+ amount:
+ type: string
+ format:
+ type: string
+ description: EmptyDir to use to populate the volume.
+ persistentVolumeClaim:
+ type: object
+ properties:
+ claimName:
+ type: string
+ readOnly:
+ type: boolean
+ description: PersistentVolumeClaim object to use to populate the volume.
+ oneOf:
+ - properties:
+ secret: {}
+ configMap: {}
+ emptyDir: {}
+ persistentVolumeClaim: {}
+ required: []
+ description: Additional volumes that can be mounted to the pod.
+ description: Template for Kafka Exporter `Pods`.
+ service:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ description: Template for Kafka Exporter `Service`.
+ container:
+ type: object
+ properties:
+ env:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: The environment variable key.
+ value:
+ type: string
+ description: The environment variable value.
+ description: Environment variables which should be applied to the container.
+ securityContext:
+ type: object
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ appArmorProfile:
+ type: object
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ capabilities:
+ type: object
+ properties:
+ add:
+ type: array
+ items:
+ type: string
+ drop:
+ type: array
+ items:
+ type: string
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
+ type: object
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ seccompProfile:
+ type: object
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ windowsOptions:
+ type: object
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ description: Security context for the container.
+ volumeMounts:
+ type: array
+ items:
+ type: object
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ recursiveReadOnly:
+ type: string
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ description: Additional volume mounts which should be applied to the container.
+ description: Template for the Kafka Exporter container.
+ serviceAccount:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ description: Template for the Kafka Exporter service account.
+ description: Customization of deployment templates and pods.
+ description: "Configuration of the Kafka Exporter. Kafka Exporter can provide additional metrics, for example lag of consumer group at topic/partition."
+ maintenanceTimeWindows:
+ type: array
+ items:
+ type: string
+ description: "A list of time windows for maintenance tasks (that is, certificates renewal). Each time window is defined by a cron expression."
+ required:
+ - kafka
+ description: "The specification of the Kafka and ZooKeeper clusters, and Topic Operator."
+ status:
+ type: object
+ properties:
+ conditions:
+ type: array
+ items:
+ type: object
+ properties:
+ type:
+ type: string
+ description: "The unique identifier of a condition, used to distinguish between other conditions in the resource."
+ status:
+ type: string
+ description: "The status of the condition, either True, False or Unknown."
+ lastTransitionTime:
+ type: string
+ description: "Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone."
+ reason:
+ type: string
+ description: The reason for the condition's last transition (a single word in CamelCase).
+ message:
+ type: string
+ description: Human-readable message indicating details about the condition's last transition.
+ description: List of status conditions.
+ observedGeneration:
+ type: integer
+ description: The generation of the CRD that was last reconciled by the operator.
+ listeners:
+ type: array
+ items:
+ type: object
+ properties:
+ type:
+ type: string
+ description: The name of the listener.
+ name:
+ type: string
+ description: The name of the listener.
+ addresses:
+ type: array
+ items:
+ type: object
+ properties:
+ host:
+ type: string
+ description: The DNS name or IP address of the Kafka bootstrap service.
+ port:
+ type: integer
+ description: The port of the Kafka bootstrap service.
+ description: A list of the addresses for this listener.
+ bootstrapServers:
+ type: string
+ description: A comma-separated list of `host:port` pairs for connecting to the Kafka cluster using this listener.
+ certificates:
+ type: array
+ items:
+ type: string
+ description: A list of TLS certificates which can be used to verify the identity of the server when connecting to the given listener. Set only for `tls` and `external` listeners.
+ description: Addresses of the internal and external listeners.
+ kafkaNodePools:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: The name of the KafkaNodePool used by this Kafka resource.
+ description: List of the KafkaNodePools used by this Kafka cluster.
+ registeredNodeIds:
+ type: array
+ items:
+ type: integer
+ description: Registered node IDs used by this Kafka cluster. This field is used for internal purposes only and will be removed in the future.
+ clusterId:
+ type: string
+ description: Kafka cluster Id.
+ operatorLastSuccessfulVersion:
+ type: string
+ description: The version of the Strimzi Cluster Operator which performed the last successful reconciliation.
+ kafkaVersion:
+ type: string
+ description: The version of Kafka currently deployed in the cluster.
+ kafkaMetadataVersion:
+ type: string
+ description: The KRaft metadata.version currently used by the Kafka cluster.
+ kafkaMetadataState:
+ type: string
+ enum:
+ - ZooKeeper
+ - KRaftMigration
+ - KRaftDualWriting
+ - KRaftPostMigration
+ - PreKRaft
+ - KRaft
+ description: "Defines where cluster metadata are stored. Possible values are: ZooKeeper if the metadata are stored in ZooKeeper; KRaftMigration if the controllers are connected to ZooKeeper, brokers are being rolled with Zookeeper migration enabled and connection information to controllers, and the metadata migration process is running; KRaftDualWriting if the metadata migration process finished and the cluster is in dual-write mode; KRaftPostMigration if the brokers are fully KRaft-based but controllers being rolled to disconnect from ZooKeeper; PreKRaft if brokers and controller are fully KRaft-based, metadata are stored in KRaft, but ZooKeeper must be deleted; KRaft if the metadata are stored in KRaft."
+ description: "The status of the Kafka and ZooKeeper clusters, and Topic Operator."
+
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: kafkamirrormakers.kafka.strimzi.io
+ labels:
+ app: strimzi
+ strimzi.io/crd-install: "true"
+spec:
+ group: kafka.strimzi.io
+ names:
+ kind: KafkaMirrorMaker
+ listKind: KafkaMirrorMakerList
+ singular: kafkamirrormaker
+ plural: kafkamirrormakers
+ shortNames:
+ - kmm
+ categories:
+ - strimzi
+ scope: Namespaced
+ conversion:
+ strategy: None
+ versions:
+ - name: v1beta2
+ served: true
+ storage: true
+ subresources:
+ status: {}
+ scale:
+ specReplicasPath: .spec.replicas
+ statusReplicasPath: .status.replicas
+ labelSelectorPath: .status.labelSelector
+ additionalPrinterColumns:
+ - name: Desired replicas
+ description: The desired number of Kafka MirrorMaker replicas
+ jsonPath: .spec.replicas
+ type: integer
+ - name: Consumer Bootstrap Servers
+ description: The boostrap servers for the consumer
+ jsonPath: .spec.consumer.bootstrapServers
+ type: string
+ priority: 1
+ - name: Producer Bootstrap Servers
+ description: The boostrap servers for the producer
+ jsonPath: .spec.producer.bootstrapServers
+ type: string
+ priority: 1
+ - name: Ready
+ description: The state of the custom resource
+ jsonPath: ".status.conditions[?(@.type==\"Ready\")].status"
+ type: string
+ schema:
+ openAPIV3Schema:
+ type: object
+ properties:
+ apiVersion:
+ type: string
+ description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources"
+ kind:
+ type: string
+ description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
+ metadata:
+ type: object
+ spec:
+ type: object
+ properties:
+ version:
+ type: string
+ description: The Kafka MirrorMaker version. Defaults to the latest version. Consult the documentation to understand the process required to upgrade or downgrade the version.
+ replicas:
+ type: integer
+ minimum: 0
+ description: The number of pods in the `Deployment`.
+ image:
+ type: string
+ description: "The container image used for Kafka MirrorMaker pods. If no image name is explicitly specified, it is determined based on the `spec.version` configuration. The image names are specifically mapped to corresponding versions in the Cluster Operator configuration."
+ consumer:
+ type: object
+ properties:
+ numStreams:
+ type: integer
+ minimum: 1
+ description: Specifies the number of consumer stream threads to create.
+ offsetCommitInterval:
+ type: integer
+ description: Specifies the offset auto-commit interval in ms. Default value is 60000.
+ bootstrapServers:
+ type: string
+ description: A list of host:port pairs for establishing the initial connection to the Kafka cluster.
+ groupId:
+ type: string
+ description: A unique string that identifies the consumer group this consumer belongs to.
+ authentication:
+ type: object
+ properties:
+ accessToken:
+ type: object
+ properties:
+ key:
type: string
- description: "The name of the scheduler used to dispatch this `Pod`. If not specified, the default scheduler will be used."
- hostAliases:
- type: array
- items:
- type: object
- properties:
- hostnames:
- type: array
- items:
- type: string
- ip:
- type: string
- description: The pod's HostAliases. HostAliases is an optional list of hosts and IPs that will be injected into the Pod's hosts file if specified.
- enableServiceLinks:
- type: boolean
- description: Indicates whether information about services should be injected into Pod's environment variables.
- tmpDirSizeLimit:
+ description: The key under which the secret value is stored in the Kubernetes Secret.
+ secretName:
type: string
- pattern: "^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
- description: Defines the total amount (for example `1Gi`) of local storage required for temporary EmptyDir volume (`/tmp`). Default value is `5Mi`.
- description: Template for Entity Operator `Pods`.
- topicOperatorContainer:
+ description: The name of the Kubernetes Secret containing the secret value.
+ required:
+ - key
+ - secretName
+ description: Link to Kubernetes Secret containing the access token which was obtained from the authorization server.
+ accessTokenIsJwt:
+ type: boolean
+ description: Configure whether access token should be treated as JWT. This should be set to `false` if the authorization server returns opaque tokens. Defaults to `true`.
+ accessTokenLocation:
+ type: string
+ description: Path to the token file containing an access token to be used for authentication.
+ audience:
+ type: string
+ description: "OAuth audience to use when authenticating against the authorization server. Some authorization servers require the audience to be explicitly set. The possible values depend on how the authorization server is configured. By default, `audience` is not specified when performing the token endpoint request."
+ certificateAndKey:
+ type: object
+ properties:
+ secretName:
+ type: string
+ description: The name of the Secret containing the certificate.
+ certificate:
+ type: string
+ description: The name of the file certificate in the Secret.
+ key:
+ type: string
+ description: The name of the private key in the Secret.
+ required:
+ - secretName
+ - certificate
+ - key
+ description: Reference to the `Secret` which holds the certificate and private key pair.
+ clientAssertion:
+ type: object
+ properties:
+ key:
+ type: string
+ description: The key under which the secret value is stored in the Kubernetes Secret.
+ secretName:
+ type: string
+ description: The name of the Kubernetes Secret containing the secret value.
+ required:
+ - key
+ - secretName
+ description: Link to Kubernetes secret containing the client assertion which was manually configured for the client.
+ clientAssertionLocation:
+ type: string
+ description: Path to the file containing the client assertion to be used for authentication.
+ clientAssertionType:
+ type: string
+ description: "The client assertion type. If not set, and either `clientAssertion` or `clientAssertionLocation` is configured, this value defaults to `urn:ietf:params:oauth:client-assertion-type:jwt-bearer`."
+ clientId:
+ type: string
+ description: OAuth Client ID which the Kafka client can use to authenticate against the OAuth server and use the token endpoint URI.
+ clientSecret:
+ type: object
+ properties:
+ key:
+ type: string
+ description: The key under which the secret value is stored in the Kubernetes Secret.
+ secretName:
+ type: string
+ description: The name of the Kubernetes Secret containing the secret value.
+ required:
+ - key
+ - secretName
+ description: Link to Kubernetes Secret containing the OAuth client secret which the Kafka client can use to authenticate against the OAuth server and use the token endpoint URI.
+ connectTimeoutSeconds:
+ type: integer
+ description: "The connect timeout in seconds when connecting to authorization server. If not set, the effective connect timeout is 60 seconds."
+ disableTlsHostnameVerification:
+ type: boolean
+ description: Enable or disable TLS hostname verification. Default value is `false`.
+ enableMetrics:
+ type: boolean
+ description: Enable or disable OAuth metrics. Default value is `false`.
+ httpRetries:
+ type: integer
+ description: "The maximum number of retries to attempt if an initial HTTP request fails. If not set, the default is to not attempt any retries."
+ httpRetryPauseMs:
+ type: integer
+ description: "The pause to take before retrying a failed HTTP request. If not set, the default is to not pause at all but to immediately repeat a request."
+ includeAcceptHeader:
+ type: boolean
+ description: Whether the Accept header should be set in requests to the authorization servers. The default value is `true`.
+ maxTokenExpirySeconds:
+ type: integer
+ description: Set or limit time-to-live of the access tokens to the specified number of seconds. This should be set if the authorization server returns opaque tokens.
+ passwordSecret:
type: object
properties:
- env:
- type: array
- items:
- type: object
- properties:
- name:
- type: string
- description: The environment variable key.
- value:
- type: string
- description: The environment variable value.
- description: Environment variables which should be applied to the container.
- securityContext:
- type: object
- properties:
- allowPrivilegeEscalation:
- type: boolean
- appArmorProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- capabilities:
- type: object
- properties:
- add:
- type: array
- items:
- type: string
- drop:
- type: array
- items:
- type: string
- privileged:
- type: boolean
- procMount:
- type: string
- readOnlyRootFilesystem:
- type: boolean
- runAsGroup:
- type: integer
- runAsNonRoot:
- type: boolean
- runAsUser:
- type: integer
- seLinuxOptions:
- type: object
- properties:
- level:
- type: string
- role:
- type: string
- type:
- type: string
- user:
- type: string
- seccompProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- windowsOptions:
- type: object
- properties:
- gmsaCredentialSpec:
- type: string
- gmsaCredentialSpecName:
- type: string
- hostProcess:
- type: boolean
- runAsUserName:
- type: string
- description: Security context for the container.
- description: Template for the Entity Topic Operator container.
- userOperatorContainer:
+ secretName:
+ type: string
+ description: The name of the Secret containing the password.
+ password:
+ type: string
+ description: The name of the key in the Secret under which the password is stored.
+ required:
+ - secretName
+ - password
+ description: Reference to the `Secret` which holds the password.
+ readTimeoutSeconds:
+ type: integer
+ description: "The read timeout in seconds when connecting to authorization server. If not set, the effective read timeout is 60 seconds."
+ refreshToken:
type: object
properties:
- env:
- type: array
- items:
- type: object
- properties:
- name:
- type: string
- description: The environment variable key.
- value:
- type: string
- description: The environment variable value.
- description: Environment variables which should be applied to the container.
- securityContext:
- type: object
- properties:
- allowPrivilegeEscalation:
- type: boolean
- appArmorProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- capabilities:
- type: object
- properties:
- add:
- type: array
- items:
- type: string
- drop:
- type: array
- items:
- type: string
- privileged:
- type: boolean
- procMount:
- type: string
- readOnlyRootFilesystem:
- type: boolean
- runAsGroup:
- type: integer
- runAsNonRoot:
- type: boolean
- runAsUser:
- type: integer
- seLinuxOptions:
- type: object
- properties:
- level:
- type: string
- role:
- type: string
- type:
- type: string
- user:
- type: string
- seccompProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- windowsOptions:
- type: object
- properties:
- gmsaCredentialSpec:
- type: string
- gmsaCredentialSpecName:
- type: string
- hostProcess:
- type: boolean
- runAsUserName:
- type: string
- description: Security context for the container.
- description: Template for the Entity User Operator container.
- tlsSidecarContainer:
+ key:
+ type: string
+ description: The key under which the secret value is stored in the Kubernetes Secret.
+ secretName:
+ type: string
+ description: The name of the Kubernetes Secret containing the secret value.
+ required:
+ - key
+ - secretName
+ description: Link to Kubernetes Secret containing the refresh token which can be used to obtain access token from the authorization server.
+ saslExtensions:
+ additionalProperties:
+ type: string
+ type: object
+ description: SASL extensions parameters.
+ scope:
+ type: string
+ description: OAuth scope to use when authenticating against the authorization server. Some authorization servers require this to be set. The possible values depend on how authorization server is configured. By default `scope` is not specified when doing the token endpoint request.
+ tlsTrustedCertificates:
+ type: array
+ items:
+ type: object
+ properties:
+ secretName:
+ type: string
+ description: The name of the Secret containing the certificate.
+ certificate:
+ type: string
+ description: The name of the file certificate in the secret.
+ pattern:
+ type: string
+ description: "Pattern for the certificate files in the secret. Use the link:https://en.wikipedia.org/wiki/Glob_(programming)[_glob syntax_] for the pattern. All files in the secret that match the pattern are used."
+ oneOf:
+ - properties:
+ certificate: {}
+ required:
+ - certificate
+ - properties:
+ pattern: {}
+ required:
+ - pattern
+ required:
+ - secretName
+ description: Trusted certificates for TLS connection to the OAuth server.
+ tokenEndpointUri:
+ type: string
+ description: Authorization server token endpoint URI.
+ type:
+ type: string
+ enum:
+ - tls
+ - scram-sha-256
+ - scram-sha-512
+ - plain
+ - oauth
+ description: "Authentication type. Currently the supported types are `tls`, `scram-sha-256`, `scram-sha-512`, `plain`, and 'oauth'. `scram-sha-256` and `scram-sha-512` types use SASL SCRAM-SHA-256 and SASL SCRAM-SHA-512 Authentication, respectively. `plain` type uses SASL PLAIN Authentication. `oauth` type uses SASL OAUTHBEARER Authentication. The `tls` type uses TLS Client Authentication. The `tls` type is supported only over TLS connections."
+ username:
+ type: string
+ description: Username used for the authentication.
+ required:
+ - type
+ description: Authentication configuration for connecting to the cluster.
+ tls:
+ type: object
+ properties:
+ trustedCertificates:
+ type: array
+ items:
+ type: object
+ properties:
+ secretName:
+ type: string
+ description: The name of the Secret containing the certificate.
+ certificate:
+ type: string
+ description: The name of the file certificate in the secret.
+ pattern:
+ type: string
+ description: "Pattern for the certificate files in the secret. Use the link:https://en.wikipedia.org/wiki/Glob_(programming)[_glob syntax_] for the pattern. All files in the secret that match the pattern are used."
+ oneOf:
+ - properties:
+ certificate: {}
+ required:
+ - certificate
+ - properties:
+ pattern: {}
+ required:
+ - pattern
+ required:
+ - secretName
+ description: Trusted certificates for TLS connection.
+ description: TLS configuration for connecting MirrorMaker to the cluster.
+ config:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ description: "The MirrorMaker consumer config. Properties with the following prefixes cannot be set: ssl., bootstrap.servers, group.id, sasl., security., interceptor.classes (with the exception of: ssl.endpoint.identification.algorithm, ssl.cipher.suites, ssl.protocol, ssl.enabled.protocols)."
+ required:
+ - bootstrapServers
+ - groupId
+ description: Configuration of source cluster.
+ producer:
+ type: object
+ properties:
+ bootstrapServers:
+ type: string
+ description: A list of host:port pairs for establishing the initial connection to the Kafka cluster.
+ abortOnSendFailure:
+ type: boolean
+ description: Flag to set the MirrorMaker to exit on a failed send. Default value is `true`.
+ authentication:
+ type: object
+ properties:
+ accessToken:
type: object
properties:
- env:
- type: array
- items:
- type: object
- properties:
- name:
- type: string
- description: The environment variable key.
- value:
- type: string
- description: The environment variable value.
- description: Environment variables which should be applied to the container.
- securityContext:
- type: object
- properties:
- allowPrivilegeEscalation:
- type: boolean
- appArmorProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- capabilities:
- type: object
- properties:
- add:
- type: array
- items:
- type: string
- drop:
- type: array
- items:
- type: string
- privileged:
- type: boolean
- procMount:
- type: string
- readOnlyRootFilesystem:
- type: boolean
- runAsGroup:
- type: integer
- runAsNonRoot:
- type: boolean
- runAsUser:
- type: integer
- seLinuxOptions:
- type: object
- properties:
- level:
- type: string
- role:
- type: string
- type:
- type: string
- user:
- type: string
- seccompProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- windowsOptions:
- type: object
- properties:
- gmsaCredentialSpec:
- type: string
- gmsaCredentialSpecName:
- type: string
- hostProcess:
- type: boolean
- runAsUserName:
- type: string
- description: Security context for the container.
- description: Template for the Entity Operator TLS sidecar container.
- serviceAccount:
+ key:
+ type: string
+ description: The key under which the secret value is stored in the Kubernetes Secret.
+ secretName:
+ type: string
+ description: The name of the Kubernetes Secret containing the secret value.
+ required:
+ - key
+ - secretName
+ description: Link to Kubernetes Secret containing the access token which was obtained from the authorization server.
+ accessTokenIsJwt:
+ type: boolean
+ description: Configure whether access token should be treated as JWT. This should be set to `false` if the authorization server returns opaque tokens. Defaults to `true`.
+ accessTokenLocation:
+ type: string
+ description: Path to the token file containing an access token to be used for authentication.
+ audience:
+ type: string
+ description: "OAuth audience to use when authenticating against the authorization server. Some authorization servers require the audience to be explicitly set. The possible values depend on how the authorization server is configured. By default, `audience` is not specified when performing the token endpoint request."
+ certificateAndKey:
type: object
properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- description: Template for the Entity Operator service account.
- entityOperatorRole:
+ secretName:
+ type: string
+ description: The name of the Secret containing the certificate.
+ certificate:
+ type: string
+ description: The name of the file certificate in the Secret.
+ key:
+ type: string
+ description: The name of the private key in the Secret.
+ required:
+ - secretName
+ - certificate
+ - key
+ description: Reference to the `Secret` which holds the certificate and private key pair.
+ clientAssertion:
type: object
properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- description: Template for the Entity Operator Role.
- topicOperatorRoleBinding:
+ key:
+ type: string
+ description: The key under which the secret value is stored in the Kubernetes Secret.
+ secretName:
+ type: string
+ description: The name of the Kubernetes Secret containing the secret value.
+ required:
+ - key
+ - secretName
+ description: Link to Kubernetes secret containing the client assertion which was manually configured for the client.
+ clientAssertionLocation:
+ type: string
+ description: Path to the file containing the client assertion to be used for authentication.
+ clientAssertionType:
+ type: string
+ description: "The client assertion type. If not set, and either `clientAssertion` or `clientAssertionLocation` is configured, this value defaults to `urn:ietf:params:oauth:client-assertion-type:jwt-bearer`."
+ clientId:
+ type: string
+ description: OAuth Client ID which the Kafka client can use to authenticate against the OAuth server and use the token endpoint URI.
+ clientSecret:
type: object
properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- description: Template for the Entity Topic Operator RoleBinding.
- userOperatorRoleBinding:
+ key:
+ type: string
+ description: The key under which the secret value is stored in the Kubernetes Secret.
+ secretName:
+ type: string
+ description: The name of the Kubernetes Secret containing the secret value.
+ required:
+ - key
+ - secretName
+ description: Link to Kubernetes Secret containing the OAuth client secret which the Kafka client can use to authenticate against the OAuth server and use the token endpoint URI.
+ connectTimeoutSeconds:
+ type: integer
+ description: "The connect timeout in seconds when connecting to authorization server. If not set, the effective connect timeout is 60 seconds."
+ disableTlsHostnameVerification:
+ type: boolean
+ description: Enable or disable TLS hostname verification. Default value is `false`.
+ enableMetrics:
+ type: boolean
+ description: Enable or disable OAuth metrics. Default value is `false`.
+ httpRetries:
+ type: integer
+ description: "The maximum number of retries to attempt if an initial HTTP request fails. If not set, the default is to not attempt any retries."
+ httpRetryPauseMs:
+ type: integer
+ description: "The pause to take before retrying a failed HTTP request. If not set, the default is to not pause at all but to immediately repeat a request."
+ includeAcceptHeader:
+ type: boolean
+ description: Whether the Accept header should be set in requests to the authorization servers. The default value is `true`.
+ maxTokenExpirySeconds:
+ type: integer
+ description: Set or limit time-to-live of the access tokens to the specified number of seconds. This should be set if the authorization server returns opaque tokens.
+ passwordSecret:
+ type: object
+ properties:
+ secretName:
+ type: string
+ description: The name of the Secret containing the password.
+ password:
+ type: string
+ description: The name of the key in the Secret under which the password is stored.
+ required:
+ - secretName
+ - password
+ description: Reference to the `Secret` which holds the password.
+ readTimeoutSeconds:
+ type: integer
+ description: "The read timeout in seconds when connecting to authorization server. If not set, the effective read timeout is 60 seconds."
+ refreshToken:
type: object
properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- description: Template for the Entity Topic Operator RoleBinding.
- description: Template for Entity Operator resources. The template allows users to specify how a `Deployment` and `Pod` is generated.
- description: Configuration of the Entity Operator.
- clusterCa:
+ key:
+ type: string
+ description: The key under which the secret value is stored in the Kubernetes Secret.
+ secretName:
+ type: string
+ description: The name of the Kubernetes Secret containing the secret value.
+ required:
+ - key
+ - secretName
+ description: Link to Kubernetes Secret containing the refresh token which can be used to obtain access token from the authorization server.
+ saslExtensions:
+ additionalProperties:
+ type: string
+ type: object
+ description: SASL extensions parameters.
+ scope:
+ type: string
+ description: OAuth scope to use when authenticating against the authorization server. Some authorization servers require this to be set. The possible values depend on how authorization server is configured. By default `scope` is not specified when doing the token endpoint request.
+ tlsTrustedCertificates:
+ type: array
+ items:
+ type: object
+ properties:
+ secretName:
+ type: string
+ description: The name of the Secret containing the certificate.
+ certificate:
+ type: string
+ description: The name of the file certificate in the secret.
+ pattern:
+ type: string
+ description: "Pattern for the certificate files in the secret. Use the link:https://en.wikipedia.org/wiki/Glob_(programming)[_glob syntax_] for the pattern. All files in the secret that match the pattern are used."
+ oneOf:
+ - properties:
+ certificate: {}
+ required:
+ - certificate
+ - properties:
+ pattern: {}
+ required:
+ - pattern
+ required:
+ - secretName
+ description: Trusted certificates for TLS connection to the OAuth server.
+ tokenEndpointUri:
+ type: string
+ description: Authorization server token endpoint URI.
+ type:
+ type: string
+ enum:
+ - tls
+ - scram-sha-256
+ - scram-sha-512
+ - plain
+ - oauth
+ description: "Authentication type. Currently the supported types are `tls`, `scram-sha-256`, `scram-sha-512`, `plain`, and 'oauth'. `scram-sha-256` and `scram-sha-512` types use SASL SCRAM-SHA-256 and SASL SCRAM-SHA-512 Authentication, respectively. `plain` type uses SASL PLAIN Authentication. `oauth` type uses SASL OAUTHBEARER Authentication. The `tls` type uses TLS Client Authentication. The `tls` type is supported only over TLS connections."
+ username:
+ type: string
+ description: Username used for the authentication.
+ required:
+ - type
+ description: Authentication configuration for connecting to the cluster.
+ config:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ description: "The MirrorMaker producer config. Properties with the following prefixes cannot be set: ssl., bootstrap.servers, sasl., security., interceptor.classes (with the exception of: ssl.endpoint.identification.algorithm, ssl.cipher.suites, ssl.protocol, ssl.enabled.protocols)."
+ tls:
+ type: object
+ properties:
+ trustedCertificates:
+ type: array
+ items:
+ type: object
+ properties:
+ secretName:
+ type: string
+ description: The name of the Secret containing the certificate.
+ certificate:
+ type: string
+ description: The name of the file certificate in the secret.
+ pattern:
+ type: string
+ description: "Pattern for the certificate files in the secret. Use the link:https://en.wikipedia.org/wiki/Glob_(programming)[_glob syntax_] for the pattern. All files in the secret that match the pattern are used."
+ oneOf:
+ - properties:
+ certificate: {}
+ required:
+ - certificate
+ - properties:
+ pattern: {}
+ required:
+ - pattern
+ required:
+ - secretName
+ description: Trusted certificates for TLS connection.
+ description: TLS configuration for connecting MirrorMaker to the cluster.
+ required:
+ - bootstrapServers
+ description: Configuration of target cluster.
+ resources:
type: object
properties:
- generateCertificateAuthority:
- type: boolean
- description: If true then Certificate Authority certificates will be generated automatically. Otherwise the user will need to provide a Secret with the CA certificate. Default is true.
- generateSecretOwnerReference:
+ claims:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
+ x-kubernetes-int-or-string: true
+ type: object
+ description: CPU and memory resources to reserve.
+ whitelist:
+ type: string
+ description: "List of topics which are included for mirroring. This option allows any regular expression using Java-style regular expressions. Mirroring two topics named A and B is achieved by using the expression `A\\|B`. Or, as a special case, you can mirror all topics using the regular expression `*`. You can also specify multiple regular expressions separated by commas."
+ include:
+ type: string
+ description: "List of topics which are included for mirroring. This option allows any regular expression using Java-style regular expressions. Mirroring two topics named A and B is achieved by using the expression `A\\|B`. Or, as a special case, you can mirror all topics using the regular expression `*`. You can also specify multiple regular expressions separated by commas."
+ jvmOptions:
+ type: object
+ properties:
+ "-XX":
+ additionalProperties:
+ type: string
+ type: object
+ description: A map of -XX options to the JVM.
+ "-Xmx":
+ type: string
+ pattern: "^[0-9]+[mMgG]?$"
+ description: -Xmx option to to the JVM.
+ "-Xms":
+ type: string
+ pattern: "^[0-9]+[mMgG]?$"
+ description: -Xms option to to the JVM.
+ gcLoggingEnabled:
type: boolean
- description: "If `true`, the Cluster and Client CA Secrets are configured with the `ownerReference` set to the `Kafka` resource. If the `Kafka` resource is deleted when `true`, the CA Secrets are also deleted. If `false`, the `ownerReference` is disabled. If the `Kafka` resource is deleted when `false`, the CA Secrets are retained and available for reuse. Default is `true`."
- validityDays:
- type: integer
- minimum: 1
- description: The number of days generated certificates should be valid for. The default is 365.
- renewalDays:
- type: integer
- minimum: 1
- description: "The number of days in the certificate renewal period. This is the number of days before the a certificate expires during which renewal actions may be performed. When `generateCertificateAuthority` is true, this will cause the generation of a new certificate. When `generateCertificateAuthority` is true, this will cause extra logging at WARN level about the pending certificate expiry. Default is 30."
- certificateExpirationPolicy:
+ description: Specifies whether the Garbage Collection logging is enabled. The default is false.
+ javaSystemProperties:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: The system property name.
+ value:
+ type: string
+ description: The system property value.
+ description: A map of additional system properties which will be passed using the `-D` option to the JVM.
+ description: JVM Options for pods.
+ logging:
+ type: object
+ properties:
+ loggers:
+ additionalProperties:
+ type: string
+ type: object
+ description: A Map from logger name to logger level.
+ type:
type: string
enum:
- - renew-certificate
- - replace-key
- description: How should CA certificate expiration be handled when `generateCertificateAuthority=true`. The default is for a new CA certificate to be generated reusing the existing private key.
- description: Configuration of the cluster certificate authority.
- clientsCa:
+ - inline
+ - external
+ description: "Logging type, must be either 'inline' or 'external'."
+ valueFrom:
+ type: object
+ properties:
+ configMapKeyRef:
+ type: object
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ description: Reference to the key in the ConfigMap containing the configuration.
+ description: '`ConfigMap` entry where the logging configuration is stored. '
+ required:
+ - type
+ description: Logging configuration for MirrorMaker.
+ metricsConfig:
+ type: object
+ properties:
+ type:
+ type: string
+ enum:
+ - jmxPrometheusExporter
+ description: Metrics type. Only 'jmxPrometheusExporter' supported currently.
+ valueFrom:
+ type: object
+ properties:
+ configMapKeyRef:
+ type: object
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ description: Reference to the key in the ConfigMap containing the configuration.
+ description: 'ConfigMap entry where the Prometheus JMX Exporter configuration is stored. '
+ required:
+ - type
+ - valueFrom
+ description: Metrics configuration.
+ tracing:
type: object
properties:
- generateCertificateAuthority:
- type: boolean
- description: If true then Certificate Authority certificates will be generated automatically. Otherwise the user will need to provide a Secret with the CA certificate. Default is true.
- generateSecretOwnerReference:
- type: boolean
- description: "If `true`, the Cluster and Client CA Secrets are configured with the `ownerReference` set to the `Kafka` resource. If the `Kafka` resource is deleted when `true`, the CA Secrets are also deleted. If `false`, the `ownerReference` is disabled. If the `Kafka` resource is deleted when `false`, the CA Secrets are retained and available for reuse. Default is `true`."
- validityDays:
- type: integer
- minimum: 1
- description: The number of days generated certificates should be valid for. The default is 365.
- renewalDays:
- type: integer
- minimum: 1
- description: "The number of days in the certificate renewal period. This is the number of days before the a certificate expires during which renewal actions may be performed. When `generateCertificateAuthority` is true, this will cause the generation of a new certificate. When `generateCertificateAuthority` is true, this will cause extra logging at WARN level about the pending certificate expiry. Default is 30."
- certificateExpirationPolicy:
+ type:
type: string
enum:
- - renew-certificate
- - replace-key
- description: How should CA certificate expiration be handled when `generateCertificateAuthority=true`. The default is for a new CA certificate to be generated reusing the existing private key.
- description: Configuration of the clients certificate authority.
- cruiseControl:
+ - jaeger
+ - opentelemetry
+ description: "Type of the tracing used. Currently the only supported type is `opentelemetry` for OpenTelemetry tracing. As of Strimzi 0.37.0, `jaeger` type is not supported anymore and this option is ignored."
+ required:
+ - type
+ description: The configuration of tracing in Kafka MirrorMaker.
+ template:
type: object
properties:
- image:
- type: string
- description: "The container image used for Cruise Control pods. If no image name is explicitly specified, the image name corresponds to the name specified in the Cluster Operator configuration. If an image name is not defined in the Cluster Operator configuration, a default value is used."
- tlsSidecar:
+ deployment:
type: object
properties:
- image:
- type: string
- description: The docker image for the container.
- resources:
+ metadata:
type: object
properties:
- claims:
- type: array
- items:
- type: object
- properties:
- name:
- type: string
- limits:
+ labels:
additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
- x-kubernetes-int-or-string: true
+ type: string
type: object
- requests:
+ description: Labels added to the Kubernetes resource.
+ annotations:
additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
- x-kubernetes-int-or-string: true
+ type: string
type: object
- description: CPU and memory resources to reserve.
- livenessProbe:
- type: object
- properties:
- initialDelaySeconds:
- type: integer
- minimum: 0
- description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
- timeoutSeconds:
- type: integer
- minimum: 1
- description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
- periodSeconds:
- type: integer
- minimum: 1
- description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
- successThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
- failureThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
- description: Pod liveness checking.
- readinessProbe:
- type: object
- properties:
- initialDelaySeconds:
- type: integer
- minimum: 0
- description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
- timeoutSeconds:
- type: integer
- minimum: 1
- description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
- periodSeconds:
- type: integer
- minimum: 1
- description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
- successThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
- failureThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
- description: Pod readiness checking.
- logLevel:
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ deploymentStrategy:
type: string
enum:
- - emerg
- - alert
- - crit
- - err
- - warning
- - notice
- - info
- - debug
- description: The log level for the TLS sidecar. Default value is `notice`.
- description: TLS sidecar configuration.
- resources:
- type: object
- properties:
- claims:
- type: array
- items:
- type: object
- properties:
- name:
- type: string
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
- x-kubernetes-int-or-string: true
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
- x-kubernetes-int-or-string: true
- type: object
- description: CPU and memory resources to reserve for the Cruise Control container.
- livenessProbe:
- type: object
- properties:
- initialDelaySeconds:
- type: integer
- minimum: 0
- description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
- timeoutSeconds:
- type: integer
- minimum: 1
- description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
- periodSeconds:
- type: integer
- minimum: 1
- description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
- successThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
- failureThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
- description: Pod liveness checking for the Cruise Control container.
- readinessProbe:
- type: object
- properties:
- initialDelaySeconds:
- type: integer
- minimum: 0
- description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
- timeoutSeconds:
- type: integer
- minimum: 1
- description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
- periodSeconds:
- type: integer
- minimum: 1
- description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
- successThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
- failureThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
- description: Pod readiness checking for the Cruise Control container.
- jvmOptions:
- type: object
- properties:
- "-XX":
- additionalProperties:
- type: string
- type: object
- description: A map of -XX options to the JVM.
- "-Xmx":
- type: string
- pattern: "^[0-9]+[mMgG]?$"
- description: -Xmx option to to the JVM.
- "-Xms":
- type: string
- pattern: "^[0-9]+[mMgG]?$"
- description: -Xms option to to the JVM.
- gcLoggingEnabled:
- type: boolean
- description: Specifies whether the Garbage Collection logging is enabled. The default is false.
- javaSystemProperties:
- type: array
- items:
- type: object
- properties:
- name:
- type: string
- description: The system property name.
- value:
- type: string
- description: The system property value.
- description: A map of additional system properties which will be passed using the `-D` option to the JVM.
- description: JVM Options for the Cruise Control container.
- logging:
+ - RollingUpdate
+ - Recreate
+ description: Pod replacement strategy for deployment configuration changes. Valid values are `RollingUpdate` and `Recreate`. Defaults to `RollingUpdate`.
+ description: Template for Kafka MirrorMaker `Deployment`.
+ pod:
type: object
properties:
- loggers:
- additionalProperties:
- type: string
+ metadata:
type: object
- description: A Map from logger name to logger level.
- type:
- type: string
- enum:
- - inline
- - external
- description: "Logging type, must be either 'inline' or 'external'."
- valueFrom:
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ imagePullSecrets:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: "List of references to secrets in the same namespace to use for pulling any of the images used by this Pod. When the `STRIMZI_IMAGE_PULL_SECRETS` environment variable in Cluster Operator and the `imagePullSecrets` option are specified, only the `imagePullSecrets` variable is used and the `STRIMZI_IMAGE_PULL_SECRETS` variable is ignored."
+ securityContext:
type: object
properties:
- configMapKeyRef:
+ appArmorProfile:
type: object
properties:
- key:
+ localhostProfile:
type: string
- name:
+ type:
type: string
- optional:
- type: boolean
- description: Reference to the key in the ConfigMap containing the configuration.
- description: '`ConfigMap` entry where the logging configuration is stored. '
- required:
- - type
- description: Logging configuration (Log4j 2) for Cruise Control.
- template:
- type: object
- properties:
- deployment:
- type: object
- properties:
- metadata:
+ fsGroup:
+ type: integer
+ fsGroupChangePolicy:
+ type: string
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
type: object
properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- deploymentStrategy:
- type: string
- enum:
- - RollingUpdate
- - Recreate
- description: Pod replacement strategy for deployment configuration changes. Valid values are `RollingUpdate` and `Recreate`. Defaults to `RollingUpdate`.
- description: Template for Cruise Control `Deployment`.
- pod:
- type: object
- properties:
- metadata:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ seccompProfile:
type: object
properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- imagePullSecrets:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ supplementalGroups:
+ type: array
+ items:
+ type: integer
+ sysctls:
type: array
items:
type: object
properties:
name:
type: string
- description: "List of references to secrets in the same namespace to use for pulling any of the images used by this Pod. When the `STRIMZI_IMAGE_PULL_SECRETS` environment variable in Cluster Operator and the `imagePullSecrets` option are specified, only the `imagePullSecrets` variable is used and the `STRIMZI_IMAGE_PULL_SECRETS` variable is ignored."
- securityContext:
+ value:
+ type: string
+ windowsOptions:
type: object
properties:
- appArmorProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- fsGroup:
- type: integer
- fsGroupChangePolicy:
+ gmsaCredentialSpec:
type: string
- runAsGroup:
- type: integer
- runAsNonRoot:
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
type: boolean
- runAsUser:
- type: integer
- seLinuxOptions:
- type: object
- properties:
- level:
- type: string
- role:
- type: string
- type:
- type: string
- user:
- type: string
- seccompProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- supplementalGroups:
- type: array
- items:
- type: integer
- sysctls:
- type: array
- items:
- type: object
- properties:
- name:
- type: string
- value:
- type: string
- windowsOptions:
- type: object
- properties:
- gmsaCredentialSpec:
- type: string
- gmsaCredentialSpecName:
- type: string
- hostProcess:
- type: boolean
- runAsUserName:
- type: string
- description: Configures pod-level security attributes and common container settings.
- terminationGracePeriodSeconds:
- type: integer
- minimum: 0
- description: "The grace period is the duration in seconds after the processes running in the pod are sent a termination signal, and the time when the processes are forcibly halted with a kill signal. Set this value to longer than the expected cleanup time for your process. Value must be a non-negative integer. A zero value indicates delete immediately. You might need to increase the grace period for very large Kafka clusters, so that the Kafka brokers have enough time to transfer their work to another broker before they are terminated. Defaults to 30 seconds."
- affinity:
- type: object
- properties:
- nodeAffinity:
- type: object
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- type: array
- items:
- type: object
- properties:
- preference:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchFields:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- weight:
- type: integer
- requiredDuringSchedulingIgnoredDuringExecution:
- type: object
- properties:
- nodeSelectorTerms:
- type: array
- items:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchFields:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- podAffinity:
+ runAsUserName:
+ type: string
+ description: Configures pod-level security attributes and common container settings.
+ terminationGracePeriodSeconds:
+ type: integer
+ minimum: 0
+ description: "The grace period is the duration in seconds after the processes running in the pod are sent a termination signal, and the time when the processes are forcibly halted with a kill signal. Set this value to longer than the expected cleanup time for your process. Value must be a non-negative integer. A zero value indicates delete immediately. You might need to increase the grace period for very large Kafka clusters, so that the Kafka brokers have enough time to transfer their work to another broker before they are terminated. Defaults to 30 seconds."
+ affinity:
+ type: object
+ properties:
+ nodeAffinity:
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ preference:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchFields:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
type: object
properties:
- preferredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
type: array
items:
type: object
properties:
- podAffinityTerm:
- type: object
- properties:
- labelSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- matchLabelKeys:
- type: array
- items:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
type: string
- mismatchLabelKeys:
- type: array
- items:
+ operator:
type: string
- namespaceSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- namespaces:
- type: array
- items:
+ values:
+ type: array
+ items:
+ type: string
+ matchFields:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
type: string
- topologyKey:
- type: string
- weight:
- type: integer
- requiredDuringSchedulingIgnoredDuringExecution:
- type: array
- items:
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ podAffinity:
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ podAffinityTerm:
type: object
properties:
labelSelector:
@@ -11617,76 +9643,76 @@ spec:
type: string
topologyKey:
type: string
- podAntiAffinity:
- type: object
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- type: array
- items:
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ labelSelector:
type: object
properties:
- podAffinityTerm:
- type: object
- properties:
- labelSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- matchLabelKeys:
- type: array
- items:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
type: string
- mismatchLabelKeys:
- type: array
- items:
+ operator:
type: string
- namespaceSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- namespaces:
- type: array
- items:
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ matchLabelKeys:
+ type: array
+ items:
+ type: string
+ mismatchLabelKeys:
+ type: array
+ items:
+ type: string
+ namespaceSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
type: string
- topologyKey:
- type: string
- weight:
- type: integer
- requiredDuringSchedulingIgnoredDuringExecution:
- type: array
- items:
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ podAntiAffinity:
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ podAffinityTerm:
type: object
properties:
labelSelector:
@@ -11743,761 +9769,1781 @@ spec:
type: string
topologyKey:
type: string
- description: The pod's affinity rules.
- tolerations:
- type: array
- items:
- type: object
- properties:
- effect:
- type: string
- key:
- type: string
- operator:
- type: string
- tolerationSeconds:
- type: integer
- value:
- type: string
- description: The pod's tolerations.
- topologySpreadConstraints:
- type: array
- items:
- type: object
- properties:
- labelSelector:
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
type: object
properties:
- matchExpressions:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ matchLabelKeys:
+ type: array
+ items:
+ type: string
+ mismatchLabelKeys:
type: array
items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
type: string
+ namespaceSelector:
type: object
- matchLabelKeys:
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ description: The pod's affinity rules.
+ tolerations:
+ type: array
+ items:
+ type: object
+ properties:
+ effect:
+ type: string
+ key:
+ type: string
+ operator:
+ type: string
+ tolerationSeconds:
+ type: integer
+ value:
+ type: string
+ description: The pod's tolerations.
+ topologySpreadConstraints:
+ type: array
+ items:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
type: array
items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
type: string
- maxSkew:
+ type: object
+ matchLabelKeys:
+ type: array
+ items:
+ type: string
+ maxSkew:
+ type: integer
+ minDomains:
+ type: integer
+ nodeAffinityPolicy:
+ type: string
+ nodeTaintsPolicy:
+ type: string
+ topologyKey:
+ type: string
+ whenUnsatisfiable:
+ type: string
+ description: The pod's topology spread constraints.
+ priorityClassName:
+ type: string
+ description: 'The name of the priority class used to assign priority to the pods. '
+ schedulerName:
+ type: string
+ description: "The name of the scheduler used to dispatch this `Pod`. If not specified, the default scheduler will be used."
+ hostAliases:
+ type: array
+ items:
+ type: object
+ properties:
+ hostnames:
+ type: array
+ items:
+ type: string
+ ip:
+ type: string
+ description: The pod's HostAliases. HostAliases is an optional list of hosts and IPs that will be injected into the Pod's hosts file if specified.
+ enableServiceLinks:
+ type: boolean
+ description: Indicates whether information about services should be injected into Pod's environment variables.
+ tmpDirSizeLimit:
+ type: string
+ pattern: "^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
+ description: "Defines the total amount of pod memory allocated for the temporary `EmptyDir` volume `/tmp`. Specify the allocation in memory units, for example, `100Mi` for 100 mebibytes. Default value is `5Mi`. The `/tmp` volume is backed by pod memory, not disk storage, so avoid setting a high value as it consumes pod memory resources."
+ volumes:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: Name to use for the volume. Required.
+ secret:
+ type: object
+ properties:
+ defaultMode:
type: integer
- minDomains:
+ items:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ optional:
+ type: boolean
+ secretName:
+ type: string
+ description: Secret to use populate the volume.
+ configMap:
+ type: object
+ properties:
+ defaultMode:
type: integer
- nodeAffinityPolicy:
+ items:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ name:
type: string
- nodeTaintsPolicy:
+ optional:
+ type: boolean
+ description: ConfigMap to use to populate the volume.
+ emptyDir:
+ type: object
+ properties:
+ medium:
type: string
- topologyKey:
+ sizeLimit:
+ type: object
+ properties:
+ amount:
+ type: string
+ format:
+ type: string
+ description: EmptyDir to use to populate the volume.
+ persistentVolumeClaim:
+ type: object
+ properties:
+ claimName:
+ type: string
+ readOnly:
+ type: boolean
+ description: PersistentVolumeClaim object to use to populate the volume.
+ oneOf:
+ - properties:
+ secret: {}
+ configMap: {}
+ emptyDir: {}
+ persistentVolumeClaim: {}
+ required: []
+ description: Additional volumes that can be mounted to the pod.
+ description: Template for Kafka MirrorMaker `Pods`.
+ podDisruptionBudget:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata to apply to the `PodDisruptionBudgetTemplate` resource.
+ maxUnavailable:
+ type: integer
+ minimum: 0
+ description: "Maximum number of unavailable pods to allow automatic Pod eviction. A Pod eviction is allowed when the `maxUnavailable` number of pods or fewer are unavailable after the eviction. Setting this value to 0 prevents all voluntary evictions, so the pods must be evicted manually. Defaults to 1."
+ description: Template for Kafka MirrorMaker `PodDisruptionBudget`.
+ mirrorMakerContainer:
+ type: object
+ properties:
+ env:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: The environment variable key.
+ value:
+ type: string
+ description: The environment variable value.
+ description: Environment variables which should be applied to the container.
+ securityContext:
+ type: object
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ appArmorProfile:
+ type: object
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ capabilities:
+ type: object
+ properties:
+ add:
+ type: array
+ items:
type: string
- whenUnsatisfiable:
+ drop:
+ type: array
+ items:
type: string
- description: The pod's topology spread constraints.
- priorityClassName:
- type: string
- description: 'The name of the priority class used to assign priority to the pods. '
- schedulerName:
+ privileged:
+ type: boolean
+ procMount:
type: string
- description: "The name of the scheduler used to dispatch this `Pod`. If not specified, the default scheduler will be used."
- hostAliases:
- type: array
- items:
- type: object
- properties:
- hostnames:
- type: array
- items:
- type: string
- ip:
- type: string
- description: The pod's HostAliases. HostAliases is an optional list of hosts and IPs that will be injected into the Pod's hosts file if specified.
- enableServiceLinks:
+ readOnlyRootFilesystem:
type: boolean
- description: Indicates whether information about services should be injected into Pod's environment variables.
- tmpDirSizeLimit:
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
+ type: object
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ seccompProfile:
+ type: object
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ windowsOptions:
+ type: object
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ description: Security context for the container.
+ volumeMounts:
+ type: array
+ items:
+ type: object
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ recursiveReadOnly:
+ type: string
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ description: Additional volume mounts which should be applied to the container.
+ description: Template for Kafka MirrorMaker container.
+ serviceAccount:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ description: Template for the Kafka MirrorMaker service account.
+ description: "Template to specify how Kafka MirrorMaker resources, `Deployments` and `Pods`, are generated."
+ livenessProbe:
+ type: object
+ properties:
+ initialDelaySeconds:
+ type: integer
+ minimum: 0
+ description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
+ timeoutSeconds:
+ type: integer
+ minimum: 1
+ description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
+ periodSeconds:
+ type: integer
+ minimum: 1
+ description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
+ successThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
+ failureThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
+ description: Pod liveness checking.
+ readinessProbe:
+ type: object
+ properties:
+ initialDelaySeconds:
+ type: integer
+ minimum: 0
+ description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
+ timeoutSeconds:
+ type: integer
+ minimum: 1
+ description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
+ periodSeconds:
+ type: integer
+ minimum: 1
+ description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
+ successThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
+ failureThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
+ description: Pod readiness checking.
+ oneOf:
+ - properties:
+ include: {}
+ required:
+ - include
+ - properties:
+ whitelist: {}
+ required:
+ - whitelist
+ required:
+ - replicas
+ - consumer
+ - producer
+ description: The specification of Kafka MirrorMaker.
+ status:
+ type: object
+ properties:
+ conditions:
+ type: array
+ items:
+ type: object
+ properties:
+ type:
+ type: string
+ description: "The unique identifier of a condition, used to distinguish between other conditions in the resource."
+ status:
+ type: string
+ description: "The status of the condition, either True, False or Unknown."
+ lastTransitionTime:
+ type: string
+ description: "Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone."
+ reason:
+ type: string
+ description: The reason for the condition's last transition (a single word in CamelCase).
+ message:
+ type: string
+ description: Human-readable message indicating details about the condition's last transition.
+ description: List of status conditions.
+ observedGeneration:
+ type: integer
+ description: The generation of the CRD that was last reconciled by the operator.
+ labelSelector:
+ type: string
+ description: Label selector for pods providing this resource.
+ replicas:
+ type: integer
+ description: The current number of pods being used to provide this resource.
+ description: The status of Kafka MirrorMaker.
+
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: kafkausers.kafka.strimzi.io
+ labels:
+ app: strimzi
+ strimzi.io/crd-install: "true"
+spec:
+ group: kafka.strimzi.io
+ names:
+ kind: KafkaUser
+ listKind: KafkaUserList
+ singular: kafkauser
+ plural: kafkausers
+ shortNames:
+ - ku
+ categories:
+ - strimzi
+ scope: Namespaced
+ conversion:
+ strategy: None
+ versions:
+ - name: v1beta2
+ served: true
+ storage: true
+ subresources:
+ status: {}
+ additionalPrinterColumns:
+ - name: Cluster
+ description: The name of the Kafka cluster this user belongs to
+ jsonPath: .metadata.labels.strimzi\.io/cluster
+ type: string
+ - name: Authentication
+ description: How the user is authenticated
+ jsonPath: .spec.authentication.type
+ type: string
+ - name: Authorization
+ description: How the user is authorised
+ jsonPath: .spec.authorization.type
+ type: string
+ - name: Ready
+ description: The state of the custom resource
+ jsonPath: ".status.conditions[?(@.type==\"Ready\")].status"
+ type: string
+ schema:
+ openAPIV3Schema:
+ type: object
+ properties:
+ apiVersion:
+ type: string
+ description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources"
+ kind:
+ type: string
+ description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
+ metadata:
+ type: object
+ spec:
+ type: object
+ properties:
+ authentication:
+ type: object
+ properties:
+ password:
+ type: object
+ properties:
+ valueFrom:
+ type: object
+ properties:
+ secretKeyRef:
+ type: object
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ description: Selects a key of a Secret in the resource's namespace.
+ description: Secret from which the password should be read.
+ required:
+ - valueFrom
+ description: "Specify the password for the user. If not set, a new password is generated by the User Operator."
+ type:
+ type: string
+ enum:
+ - tls
+ - tls-external
+ - scram-sha-512
+ description: Authentication type.
+ required:
+ - type
+ description: "Authentication mechanism enabled for this Kafka user. The supported authentication mechanisms are `scram-sha-512`, `tls`, and `tls-external`. \n\n* `scram-sha-512` generates a secret with SASL SCRAM-SHA-512 credentials.\n* `tls` generates a secret with user certificate for mutual TLS authentication.\n* `tls-external` does not generate a user certificate. But prepares the user for using mutual TLS authentication using a user certificate generated outside the User Operator.\n ACLs and quotas set for this user are configured in the `CN=` format.\n\nAuthentication is optional. If authentication is not configured, no credentials are generated. ACLs and quotas set for the user are configured in the `` format suitable for SASL authentication."
+ authorization:
+ type: object
+ properties:
+ acls:
+ type: array
+ items:
+ type: object
+ properties:
+ type:
+ type: string
+ enum:
+ - allow
+ - deny
+ description: The type of the rule. Currently the only supported type is `allow`. ACL rules with type `allow` are used to allow user to execute the specified operations. Default value is `allow`.
+ resource:
+ type: object
+ properties:
+ name:
+ type: string
+ description: Name of resource for which given ACL rule applies. Can be combined with `patternType` field to use prefix pattern.
+ patternType:
+ type: string
+ enum:
+ - literal
+ - prefix
+ description: "Describes the pattern used in the resource field. The supported types are `literal` and `prefix`. With `literal` pattern type, the resource field will be used as a definition of a full name. With `prefix` pattern type, the resource name will be used only as a prefix. Default value is `literal`."
+ type:
+ type: string
+ enum:
+ - topic
+ - group
+ - cluster
+ - transactionalId
+ description: "Resource type. The available resource types are `topic`, `group`, `cluster`, and `transactionalId`."
+ required:
+ - type
+ description: Indicates the resource for which given ACL rule applies.
+ host:
+ type: string
+ description: "The host from which the action described in the ACL rule is allowed or denied. If not set, it defaults to `*`, allowing or denying the action from any host."
+ operation:
+ type: string
+ enum:
+ - Read
+ - Write
+ - Create
+ - Delete
+ - Alter
+ - Describe
+ - ClusterAction
+ - AlterConfigs
+ - DescribeConfigs
+ - IdempotentWrite
+ - All
+ description: "Operation which will be allowed or denied. Supported operations are: Read, Write, Create, Delete, Alter, Describe, ClusterAction, AlterConfigs, DescribeConfigs, IdempotentWrite and All."
+ operations:
+ type: array
+ items:
type: string
- pattern: "^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
- description: Defines the total amount (for example `1Gi`) of local storage required for temporary EmptyDir volume (`/tmp`). Default value is `5Mi`.
- description: Template for Cruise Control `Pods`.
- apiService:
+ enum:
+ - Read
+ - Write
+ - Create
+ - Delete
+ - Alter
+ - Describe
+ - ClusterAction
+ - AlterConfigs
+ - DescribeConfigs
+ - IdempotentWrite
+ - All
+ description: "List of operations to allow or deny. Supported operations are: Read, Write, Create, Delete, Alter, Describe, ClusterAction, AlterConfigs, DescribeConfigs, IdempotentWrite and All. Only certain operations work with the specified resource."
+ required:
+ - resource
+ description: List of ACL rules which should be applied to this user.
+ type:
+ type: string
+ enum:
+ - simple
+ description: Authorization type. Currently the only supported type is `simple`. `simple` authorization type uses the Kafka Admin API for managing the ACL rules.
+ required:
+ - acls
+ - type
+ description: Authorization rules for this Kafka user.
+ quotas:
+ type: object
+ properties:
+ producerByteRate:
+ type: integer
+ minimum: 0
+ description: A quota on the maximum bytes per-second that each client group can publish to a broker before the clients in the group are throttled. Defined on a per-broker basis.
+ consumerByteRate:
+ type: integer
+ minimum: 0
+ description: A quota on the maximum bytes per-second that each client group can fetch from a broker before the clients in the group are throttled. Defined on a per-broker basis.
+ requestPercentage:
+ type: integer
+ minimum: 0
+ description: A quota on the maximum CPU utilization of each client group as a percentage of network and I/O threads.
+ controllerMutationRate:
+ type: number
+ minimum: 0
+ description: "A quota on the rate at which mutations are accepted for the create topics request, the create partitions request and the delete topics request. The rate is accumulated by the number of partitions created or deleted."
+ description: Quotas on requests to control the broker resources used by clients. Network bandwidth and request rate quotas can be enforced.Kafka documentation for Kafka User quotas can be found at http://kafka.apache.org/documentation/#design_quotas.
+ template:
+ type: object
+ properties:
+ secret:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ description: Template for KafkaUser resources. The template allows users to specify how the `Secret` with password or TLS certificates is generated.
+ description: Template to specify how Kafka User `Secrets` are generated.
+ description: The specification of the user.
+ status:
+ type: object
+ properties:
+ conditions:
+ type: array
+ items:
+ type: object
+ properties:
+ type:
+ type: string
+ description: "The unique identifier of a condition, used to distinguish between other conditions in the resource."
+ status:
+ type: string
+ description: "The status of the condition, either True, False or Unknown."
+ lastTransitionTime:
+ type: string
+ description: "Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone."
+ reason:
+ type: string
+ description: The reason for the condition's last transition (a single word in CamelCase).
+ message:
+ type: string
+ description: Human-readable message indicating details about the condition's last transition.
+ description: List of status conditions.
+ observedGeneration:
+ type: integer
+ description: The generation of the CRD that was last reconciled by the operator.
+ username:
+ type: string
+ description: Username.
+ secret:
+ type: string
+ description: The name of `Secret` where the credentials are stored.
+ description: The status of the Kafka User.
+ - name: v1beta1
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ additionalPrinterColumns:
+ - name: Cluster
+ description: The name of the Kafka cluster this user belongs to
+ jsonPath: .metadata.labels.strimzi\.io/cluster
+ type: string
+ - name: Authentication
+ description: How the user is authenticated
+ jsonPath: .spec.authentication.type
+ type: string
+ - name: Authorization
+ description: How the user is authorised
+ jsonPath: .spec.authorization.type
+ type: string
+ - name: Ready
+ description: The state of the custom resource
+ jsonPath: ".status.conditions[?(@.type==\"Ready\")].status"
+ type: string
+ schema:
+ openAPIV3Schema:
+ type: object
+ properties:
+ apiVersion:
+ type: string
+ description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources"
+ kind:
+ type: string
+ description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
+ metadata:
+ type: object
+ spec:
+ type: object
+ properties:
+ authentication:
+ type: object
+ properties:
+ password:
+ type: object
+ properties:
+ valueFrom:
type: object
properties:
- metadata:
+ secretKeyRef:
type: object
properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- ipFamilyPolicy:
- type: string
- enum:
- - SingleStack
- - PreferDualStack
- - RequireDualStack
- description: "Specifies the IP Family Policy used by the service. Available options are `SingleStack`, `PreferDualStack` and `RequireDualStack`. `SingleStack` is for a single IP family. `PreferDualStack` is for two IP families on dual-stack configured clusters or a single IP family on single-stack clusters. `RequireDualStack` fails unless there are two IP families on dual-stack configured clusters. If unspecified, Kubernetes will choose the default value based on the service type."
- ipFamilies:
- type: array
- items:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ description: Selects a key of a Secret in the resource's namespace.
+ description: Secret from which the password should be read.
+ required:
+ - valueFrom
+ description: "Specify the password for the user. If not set, a new password is generated by the User Operator."
+ type:
+ type: string
+ enum:
+ - tls
+ - tls-external
+ - scram-sha-512
+ description: Authentication type.
+ required:
+ - type
+ description: "Authentication mechanism enabled for this Kafka user. The supported authentication mechanisms are `scram-sha-512`, `tls`, and `tls-external`. \n\n* `scram-sha-512` generates a secret with SASL SCRAM-SHA-512 credentials.\n* `tls` generates a secret with user certificate for mutual TLS authentication.\n* `tls-external` does not generate a user certificate. But prepares the user for using mutual TLS authentication using a user certificate generated outside the User Operator.\n ACLs and quotas set for this user are configured in the `CN=` format.\n\nAuthentication is optional. If authentication is not configured, no credentials are generated. ACLs and quotas set for the user are configured in the `` format suitable for SASL authentication."
+ authorization:
+ type: object
+ properties:
+ acls:
+ type: array
+ items:
+ type: object
+ properties:
+ type:
+ type: string
+ enum:
+ - allow
+ - deny
+ description: The type of the rule. Currently the only supported type is `allow`. ACL rules with type `allow` are used to allow user to execute the specified operations. Default value is `allow`.
+ resource:
+ type: object
+ properties:
+ name:
+ type: string
+ description: Name of resource for which given ACL rule applies. Can be combined with `patternType` field to use prefix pattern.
+ patternType:
type: string
enum:
- - IPv4
- - IPv6
- description: "Specifies the IP Families used by the service. Available options are `IPv4` and `IPv6`. If unspecified, Kubernetes will choose the default value based on the `ipFamilyPolicy` setting."
- description: Template for Cruise Control API `Service`.
- podDisruptionBudget:
+ - literal
+ - prefix
+ description: "Describes the pattern used in the resource field. The supported types are `literal` and `prefix`. With `literal` pattern type, the resource field will be used as a definition of a full name. With `prefix` pattern type, the resource name will be used only as a prefix. Default value is `literal`."
+ type:
+ type: string
+ enum:
+ - topic
+ - group
+ - cluster
+ - transactionalId
+ description: "Resource type. The available resource types are `topic`, `group`, `cluster`, and `transactionalId`."
+ required:
+ - type
+ description: Indicates the resource for which given ACL rule applies.
+ host:
+ type: string
+ description: "The host from which the action described in the ACL rule is allowed or denied. If not set, it defaults to `*`, allowing or denying the action from any host."
+ operation:
+ type: string
+ enum:
+ - Read
+ - Write
+ - Create
+ - Delete
+ - Alter
+ - Describe
+ - ClusterAction
+ - AlterConfigs
+ - DescribeConfigs
+ - IdempotentWrite
+ - All
+ description: "Operation which will be allowed or denied. Supported operations are: Read, Write, Create, Delete, Alter, Describe, ClusterAction, AlterConfigs, DescribeConfigs, IdempotentWrite and All."
+ operations:
+ type: array
+ items:
+ type: string
+ enum:
+ - Read
+ - Write
+ - Create
+ - Delete
+ - Alter
+ - Describe
+ - ClusterAction
+ - AlterConfigs
+ - DescribeConfigs
+ - IdempotentWrite
+ - All
+ description: "List of operations to allow or deny. Supported operations are: Read, Write, Create, Delete, Alter, Describe, ClusterAction, AlterConfigs, DescribeConfigs, IdempotentWrite and All. Only certain operations work with the specified resource."
+ required:
+ - resource
+ description: List of ACL rules which should be applied to this user.
+ type:
+ type: string
+ enum:
+ - simple
+ description: Authorization type. Currently the only supported type is `simple`. `simple` authorization type uses the Kafka Admin API for managing the ACL rules.
+ required:
+ - acls
+ - type
+ description: Authorization rules for this Kafka user.
+ quotas:
+ type: object
+ properties:
+ producerByteRate:
+ type: integer
+ minimum: 0
+ description: A quota on the maximum bytes per-second that each client group can publish to a broker before the clients in the group are throttled. Defined on a per-broker basis.
+ consumerByteRate:
+ type: integer
+ minimum: 0
+ description: A quota on the maximum bytes per-second that each client group can fetch from a broker before the clients in the group are throttled. Defined on a per-broker basis.
+ requestPercentage:
+ type: integer
+ minimum: 0
+ description: A quota on the maximum CPU utilization of each client group as a percentage of network and I/O threads.
+ controllerMutationRate:
+ type: number
+ minimum: 0
+ description: "A quota on the rate at which mutations are accepted for the create topics request, the create partitions request and the delete topics request. The rate is accumulated by the number of partitions created or deleted."
+ description: Quotas on requests to control the broker resources used by clients. Network bandwidth and request rate quotas can be enforced.Kafka documentation for Kafka User quotas can be found at http://kafka.apache.org/documentation/#design_quotas.
+ template:
+ type: object
+ properties:
+ secret:
+ type: object
+ properties:
+ metadata:
type: object
properties:
- metadata:
+ labels:
+ additionalProperties:
+ type: string
type: object
- properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata to apply to the `PodDisruptionBudgetTemplate` resource.
- maxUnavailable:
- type: integer
- minimum: 0
- description: "Maximum number of unavailable pods to allow automatic Pod eviction. A Pod eviction is allowed when the `maxUnavailable` number of pods or fewer are unavailable after the eviction. Setting this value to 0 prevents all voluntary evictions, so the pods must be evicted manually. Defaults to 1."
- description: Template for Cruise Control `PodDisruptionBudget`.
- cruiseControlContainer:
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ description: Template for KafkaUser resources. The template allows users to specify how the `Secret` with password or TLS certificates is generated.
+ description: Template to specify how Kafka User `Secrets` are generated.
+ description: The specification of the user.
+ status:
+ type: object
+ properties:
+ conditions:
+ type: array
+ items:
+ type: object
+ properties:
+ type:
+ type: string
+ description: "The unique identifier of a condition, used to distinguish between other conditions in the resource."
+ status:
+ type: string
+ description: "The status of the condition, either True, False or Unknown."
+ lastTransitionTime:
+ type: string
+ description: "Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone."
+ reason:
+ type: string
+ description: The reason for the condition's last transition (a single word in CamelCase).
+ message:
+ type: string
+ description: Human-readable message indicating details about the condition's last transition.
+ description: List of status conditions.
+ observedGeneration:
+ type: integer
+ description: The generation of the CRD that was last reconciled by the operator.
+ username:
+ type: string
+ description: Username.
+ secret:
+ type: string
+ description: The name of `Secret` where the credentials are stored.
+ description: The status of the Kafka User.
+ - name: v1alpha1
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ additionalPrinterColumns:
+ - name: Cluster
+ description: The name of the Kafka cluster this user belongs to
+ jsonPath: .metadata.labels.strimzi\.io/cluster
+ type: string
+ - name: Authentication
+ description: How the user is authenticated
+ jsonPath: .spec.authentication.type
+ type: string
+ - name: Authorization
+ description: How the user is authorised
+ jsonPath: .spec.authorization.type
+ type: string
+ - name: Ready
+ description: The state of the custom resource
+ jsonPath: ".status.conditions[?(@.type==\"Ready\")].status"
+ type: string
+ schema:
+ openAPIV3Schema:
+ type: object
+ properties:
+ apiVersion:
+ type: string
+ description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources"
+ kind:
+ type: string
+ description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
+ metadata:
+ type: object
+ spec:
+ type: object
+ properties:
+ authentication:
+ type: object
+ properties:
+ password:
+ type: object
+ properties:
+ valueFrom:
type: object
properties:
- env:
- type: array
- items:
- type: object
- properties:
- name:
- type: string
- description: The environment variable key.
- value:
- type: string
- description: The environment variable value.
- description: Environment variables which should be applied to the container.
- securityContext:
+ secretKeyRef:
type: object
properties:
- allowPrivilegeEscalation:
- type: boolean
- appArmorProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- capabilities:
- type: object
- properties:
- add:
- type: array
- items:
- type: string
- drop:
- type: array
- items:
- type: string
- privileged:
- type: boolean
- procMount:
+ key:
type: string
- readOnlyRootFilesystem:
- type: boolean
- runAsGroup:
- type: integer
- runAsNonRoot:
+ name:
+ type: string
+ optional:
type: boolean
- runAsUser:
- type: integer
- seLinuxOptions:
- type: object
- properties:
- level:
- type: string
- role:
- type: string
- type:
- type: string
- user:
- type: string
- seccompProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- windowsOptions:
- type: object
- properties:
- gmsaCredentialSpec:
- type: string
- gmsaCredentialSpecName:
- type: string
- hostProcess:
- type: boolean
- runAsUserName:
- type: string
- description: Security context for the container.
- description: Template for the Cruise Control container.
- tlsSidecarContainer:
+ description: Selects a key of a Secret in the resource's namespace.
+ description: Secret from which the password should be read.
+ required:
+ - valueFrom
+ description: "Specify the password for the user. If not set, a new password is generated by the User Operator."
+ type:
+ type: string
+ enum:
+ - tls
+ - tls-external
+ - scram-sha-512
+ description: Authentication type.
+ required:
+ - type
+ description: "Authentication mechanism enabled for this Kafka user. The supported authentication mechanisms are `scram-sha-512`, `tls`, and `tls-external`. \n\n* `scram-sha-512` generates a secret with SASL SCRAM-SHA-512 credentials.\n* `tls` generates a secret with user certificate for mutual TLS authentication.\n* `tls-external` does not generate a user certificate. But prepares the user for using mutual TLS authentication using a user certificate generated outside the User Operator.\n ACLs and quotas set for this user are configured in the `CN=` format.\n\nAuthentication is optional. If authentication is not configured, no credentials are generated. ACLs and quotas set for the user are configured in the `` format suitable for SASL authentication."
+ authorization:
+ type: object
+ properties:
+ acls:
+ type: array
+ items:
+ type: object
+ properties:
+ type:
+ type: string
+ enum:
+ - allow
+ - deny
+ description: The type of the rule. Currently the only supported type is `allow`. ACL rules with type `allow` are used to allow user to execute the specified operations. Default value is `allow`.
+ resource:
+ type: object
+ properties:
+ name:
+ type: string
+ description: Name of resource for which given ACL rule applies. Can be combined with `patternType` field to use prefix pattern.
+ patternType:
+ type: string
+ enum:
+ - literal
+ - prefix
+ description: "Describes the pattern used in the resource field. The supported types are `literal` and `prefix`. With `literal` pattern type, the resource field will be used as a definition of a full name. With `prefix` pattern type, the resource name will be used only as a prefix. Default value is `literal`."
+ type:
+ type: string
+ enum:
+ - topic
+ - group
+ - cluster
+ - transactionalId
+ description: "Resource type. The available resource types are `topic`, `group`, `cluster`, and `transactionalId`."
+ required:
+ - type
+ description: Indicates the resource for which given ACL rule applies.
+ host:
+ type: string
+ description: "The host from which the action described in the ACL rule is allowed or denied. If not set, it defaults to `*`, allowing or denying the action from any host."
+ operation:
+ type: string
+ enum:
+ - Read
+ - Write
+ - Create
+ - Delete
+ - Alter
+ - Describe
+ - ClusterAction
+ - AlterConfigs
+ - DescribeConfigs
+ - IdempotentWrite
+ - All
+ description: "Operation which will be allowed or denied. Supported operations are: Read, Write, Create, Delete, Alter, Describe, ClusterAction, AlterConfigs, DescribeConfigs, IdempotentWrite and All."
+ operations:
+ type: array
+ items:
+ type: string
+ enum:
+ - Read
+ - Write
+ - Create
+ - Delete
+ - Alter
+ - Describe
+ - ClusterAction
+ - AlterConfigs
+ - DescribeConfigs
+ - IdempotentWrite
+ - All
+ description: "List of operations to allow or deny. Supported operations are: Read, Write, Create, Delete, Alter, Describe, ClusterAction, AlterConfigs, DescribeConfigs, IdempotentWrite and All. Only certain operations work with the specified resource."
+ required:
+ - resource
+ description: List of ACL rules which should be applied to this user.
+ type:
+ type: string
+ enum:
+ - simple
+ description: Authorization type. Currently the only supported type is `simple`. `simple` authorization type uses the Kafka Admin API for managing the ACL rules.
+ required:
+ - acls
+ - type
+ description: Authorization rules for this Kafka user.
+ quotas:
+ type: object
+ properties:
+ producerByteRate:
+ type: integer
+ minimum: 0
+ description: A quota on the maximum bytes per-second that each client group can publish to a broker before the clients in the group are throttled. Defined on a per-broker basis.
+ consumerByteRate:
+ type: integer
+ minimum: 0
+ description: A quota on the maximum bytes per-second that each client group can fetch from a broker before the clients in the group are throttled. Defined on a per-broker basis.
+ requestPercentage:
+ type: integer
+ minimum: 0
+ description: A quota on the maximum CPU utilization of each client group as a percentage of network and I/O threads.
+ controllerMutationRate:
+ type: number
+ minimum: 0
+ description: "A quota on the rate at which mutations are accepted for the create topics request, the create partitions request and the delete topics request. The rate is accumulated by the number of partitions created or deleted."
+ description: Quotas on requests to control the broker resources used by clients. Network bandwidth and request rate quotas can be enforced.Kafka documentation for Kafka User quotas can be found at http://kafka.apache.org/documentation/#design_quotas.
+ template:
+ type: object
+ properties:
+ secret:
+ type: object
+ properties:
+ metadata:
type: object
properties:
- env:
- type: array
- items:
- type: object
- properties:
- name:
- type: string
- description: The environment variable key.
- value:
- type: string
- description: The environment variable value.
- description: Environment variables which should be applied to the container.
- securityContext:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ description: Template for KafkaUser resources. The template allows users to specify how the `Secret` with password or TLS certificates is generated.
+ description: Template to specify how Kafka User `Secrets` are generated.
+ description: The specification of the user.
+ status:
+ type: object
+ properties:
+ conditions:
+ type: array
+ items:
+ type: object
+ properties:
+ type:
+ type: string
+ description: "The unique identifier of a condition, used to distinguish between other conditions in the resource."
+ status:
+ type: string
+ description: "The status of the condition, either True, False or Unknown."
+ lastTransitionTime:
+ type: string
+ description: "Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone."
+ reason:
+ type: string
+ description: The reason for the condition's last transition (a single word in CamelCase).
+ message:
+ type: string
+ description: Human-readable message indicating details about the condition's last transition.
+ description: List of status conditions.
+ observedGeneration:
+ type: integer
+ description: The generation of the CRD that was last reconciled by the operator.
+ username:
+ type: string
+ description: Username.
+ secret:
+ type: string
+ description: The name of `Secret` where the credentials are stored.
+ description: The status of the Kafka User.
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: strimzi-cluster-operator-global
+ labels:
+ app: strimzi
+rules:
+ - apiGroups:
+ - "rbac.authorization.k8s.io"
+ resources:
+ # The cluster operator needs to create and manage cluster role bindings in the case of an install where a user
+ # has specified they want their cluster role bindings generated
+ - clusterrolebindings
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - patch
+ - update
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ # The cluster operator requires "get" permissions to view storage class details
+ # This is because only a persistent volume of a supported storage class type can be resized
+ - storageclasses
+ verbs:
+ - get
+ - apiGroups:
+ - ""
+ resources:
+ # The cluster operator requires "list" permissions to view all nodes in a cluster
+ # The listing is used to determine the node addresses when NodePort access is configured
+ # These addresses are then exposed in the custom resource states
+ - nodes
+ verbs:
+ - list
+
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: strimzi-cluster-operator
+ labels:
+ app: strimzi
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: strimzi-cluster-operator-watched
+ labels:
+ app: strimzi
+rules:
+ # Resources in this role are being watched by the operator. When operator is deployed as cluster-wide, these permissions
+ # need to be granted to the operator on a cluster wide level as well, even if the operands will be deployed only in
+ # few of the namespaces in given cluster. This is required to set up the Kubernetes watches and informers.
+ # Note: The rights included in this role might change in the future
+ - apiGroups:
+ - ""
+ resources:
+ # The cluster operator needs to access and delete pods, this is to allow it to monitor pod health and coordinate rolling updates
+ - pods
+ verbs:
+ - watch
+ - list
+ - apiGroups:
+ - "kafka.strimzi.io"
+ resources:
+ # The Cluster Operator operates the Strimzi custom resources
+ - kafkas
+ - kafkanodepools
+ - kafkaconnects
+ - kafkaconnectors
+ - kafkamirrormakers
+ - kafkabridges
+ - kafkamirrormaker2s
+ - kafkarebalances
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - patch
+ - update
+ - apiGroups:
+ - "kafka.strimzi.io"
+ resources:
+ # The Cluster Operator needs to manage the status of the Strimzi custom resources
+ - kafkas/status
+ - kafkanodepools/status
+ - kafkaconnects/status
+ - kafkaconnectors/status
+ - kafkamirrormakers/status
+ - kafkabridges/status
+ - kafkamirrormaker2s/status
+ - kafkarebalances/status
+ verbs:
+ - get
+ - patch
+ - update
+ - apiGroups:
+ - "core.strimzi.io"
+ resources:
+ # The cluster operator uses StrimziPodSets to manage the Kafka and ZooKeeper pods
+ - strimzipodsets
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - patch
+ - update
+ - apiGroups:
+ - "core.strimzi.io"
+ resources:
+ # The Cluster Operator needs to manage the status of the StrimziPodSet custom resource
+ - strimzipodsets/status
+ verbs:
+ - get
+ - patch
+ - update
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: strimzi-cluster-operator-watched
+ labels:
+ app: strimzi
+subjects:
+ - kind: ServiceAccount
+ name: strimzi-cluster-operator
+ namespace: myproject
+roleRef:
+ kind: ClusterRole
+ name: strimzi-cluster-operator-watched
+ apiGroup: rbac.authorization.k8s.io
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: strimzi-cluster-operator-kafka-broker-delegation
+ labels:
+ app: strimzi
+# The Kafka broker cluster role must be bound to the cluster operator service account so that it can delegate the cluster role to the Kafka brokers.
+# This must be done to avoid escalating privileges which would be blocked by Kubernetes.
+subjects:
+ - kind: ServiceAccount
+ name: strimzi-cluster-operator
+ namespace: myproject
+roleRef:
+ kind: ClusterRole
+ name: strimzi-kafka-broker
+ apiGroup: rbac.authorization.k8s.io
+
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: kafkanodepools.kafka.strimzi.io
+ labels:
+ app: strimzi
+ strimzi.io/crd-install: "true"
+spec:
+ group: kafka.strimzi.io
+ names:
+ kind: KafkaNodePool
+ listKind: KafkaNodePoolList
+ singular: kafkanodepool
+ plural: kafkanodepools
+ shortNames:
+ - knp
+ categories:
+ - strimzi
+ scope: Namespaced
+ conversion:
+ strategy: None
+ versions:
+ - name: v1beta2
+ served: true
+ storage: true
+ subresources:
+ status: {}
+ scale:
+ specReplicasPath: .spec.replicas
+ statusReplicasPath: .status.replicas
+ labelSelectorPath: .status.labelSelector
+ additionalPrinterColumns:
+ - name: Desired replicas
+ description: The desired number of replicas
+ jsonPath: .spec.replicas
+ type: integer
+ - name: Roles
+ description: Roles of the nodes in the pool
+ jsonPath: .status.roles
+ type: string
+ - name: NodeIds
+ description: Node IDs used by Kafka nodes in this pool
+ jsonPath: .status.nodeIds
+ type: string
+ schema:
+ openAPIV3Schema:
+ type: object
+ properties:
+ apiVersion:
+ type: string
+ description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources"
+ kind:
+ type: string
+ description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
+ metadata:
+ type: object
+ spec:
+ type: object
+ properties:
+ replicas:
+ type: integer
+ minimum: 0
+ description: The number of pods in the pool.
+ storage:
+ type: object
+ properties:
+ class:
+ type: string
+ description: The storage class to use for dynamic volume allocation.
+ deleteClaim:
+ type: boolean
+ description: Specifies if the persistent volume claim has to be deleted when the cluster is un-deployed.
+ id:
+ type: integer
+ minimum: 0
+ description: Storage identification number. It is mandatory only for storage volumes defined in a storage of type 'jbod'.
+ kraftMetadata:
+ type: string
+ enum:
+ - shared
+ description: "Specifies whether this volume should be used for storing KRaft metadata. This property is optional. When set, the only currently supported value is `shared`. At most one volume can have this property set."
+ overrides:
+ type: array
+ items:
+ type: object
+ properties:
+ class:
+ type: string
+ description: The storage class to use for dynamic volume allocation for this broker.
+ broker:
+ type: integer
+ description: Id of the kafka broker (broker identifier).
+ description: Overrides for individual brokers. The `overrides` field allows you to specify a different configuration for different brokers.
+ selector:
+ additionalProperties:
+ type: string
+ type: object
+ description: Specifies a specific persistent volume to use. It contains key:value pairs representing labels for selecting such a volume.
+ size:
+ type: string
+ description: "When `type=persistent-claim`, defines the size of the persistent volume claim, such as 100Gi. Mandatory when `type=persistent-claim`."
+ sizeLimit:
+ type: string
+ pattern: "^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
+ description: "When type=ephemeral, defines the total amount of local storage required for this EmptyDir volume (for example 1Gi)."
+ type:
+ type: string
+ enum:
+ - ephemeral
+ - persistent-claim
+ - jbod
+ description: "Storage type, must be either 'ephemeral', 'persistent-claim', or 'jbod'."
+ volumes:
+ type: array
+ items:
+ type: object
+ properties:
+ class:
+ type: string
+ description: The storage class to use for dynamic volume allocation.
+ deleteClaim:
+ type: boolean
+ description: Specifies if the persistent volume claim has to be deleted when the cluster is un-deployed.
+ id:
+ type: integer
+ minimum: 0
+ description: Storage identification number. Mandatory for storage volumes defined with a `jbod` storage type configuration.
+ kraftMetadata:
+ type: string
+ enum:
+ - shared
+ description: "Specifies whether this volume should be used for storing KRaft metadata. This property is optional. When set, the only currently supported value is `shared`. At most one volume can have this property set."
+ overrides:
+ type: array
+ items:
type: object
properties:
- allowPrivilegeEscalation:
- type: boolean
- appArmorProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- capabilities:
- type: object
- properties:
- add:
- type: array
- items:
- type: string
- drop:
- type: array
- items:
- type: string
- privileged:
- type: boolean
- procMount:
+ class:
type: string
- readOnlyRootFilesystem:
- type: boolean
- runAsGroup:
- type: integer
- runAsNonRoot:
- type: boolean
- runAsUser:
+ description: The storage class to use for dynamic volume allocation for this broker.
+ broker:
type: integer
- seLinuxOptions:
- type: object
- properties:
- level:
- type: string
- role:
- type: string
- type:
- type: string
- user:
- type: string
- seccompProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- windowsOptions:
- type: object
- properties:
- gmsaCredentialSpec:
- type: string
- gmsaCredentialSpecName:
- type: string
- hostProcess:
- type: boolean
- runAsUserName:
- type: string
- description: Security context for the container.
- description: Template for the Cruise Control TLS sidecar container.
- serviceAccount:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- description: Template for the Cruise Control service account.
- description: "Template to specify how Cruise Control resources, `Deployments` and `Pods`, are generated."
- brokerCapacity:
- type: object
- properties:
- disk:
- type: string
- pattern: "^[0-9]+([.][0-9]*)?([KMGTPE]i?|e[0-9]+)?$"
- description: "Broker capacity for disk in bytes. Use a number value with either standard Kubernetes byte units (K, M, G, or T), their bibyte (power of two) equivalents (Ki, Mi, Gi, or Ti), or a byte value with or without E notation. For example, 100000M, 100000Mi, 104857600000, or 1e+11."
- cpuUtilization:
- type: integer
- minimum: 0
- maximum: 100
- description: Broker capacity for CPU resource utilization as a percentage (0 - 100).
- cpu:
- type: string
- pattern: "^[0-9]+([.][0-9]{0,3}|[m]?)$"
- description: "Broker capacity for CPU resource in cores or millicores. For example, 1, 1.500, 1500m. For more information on valid CPU resource units see https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu."
- inboundNetwork:
- type: string
- pattern: "^[0-9]+([KMG]i?)?B/s$"
- description: "Broker capacity for inbound network throughput in bytes per second. Use an integer value with standard Kubernetes byte units (K, M, G) or their bibyte (power of two) equivalents (Ki, Mi, Gi) per second. For example, 10000KiB/s."
- outboundNetwork:
- type: string
- pattern: "^[0-9]+([KMG]i?)?B/s$"
- description: "Broker capacity for outbound network throughput in bytes per second. Use an integer value with standard Kubernetes byte units (K, M, G) or their bibyte (power of two) equivalents (Ki, Mi, Gi) per second. For example, 10000KiB/s."
- overrides:
- type: array
- items:
+ description: Id of the kafka broker (broker identifier).
+ description: Overrides for individual brokers. The `overrides` field allows you to specify a different configuration for different brokers.
+ selector:
+ additionalProperties:
+ type: string
type: object
- properties:
- brokers:
- type: array
- items:
- type: integer
- description: List of Kafka brokers (broker identifiers).
- cpu:
- type: string
- pattern: "^[0-9]+([.][0-9]{0,3}|[m]?)$"
- description: "Broker capacity for CPU resource in cores or millicores. For example, 1, 1.500, 1500m. For more information on valid CPU resource units see https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu."
- inboundNetwork:
- type: string
- pattern: "^[0-9]+([KMG]i?)?B/s$"
- description: "Broker capacity for inbound network throughput in bytes per second. Use an integer value with standard Kubernetes byte units (K, M, G) or their bibyte (power of two) equivalents (Ki, Mi, Gi) per second. For example, 10000KiB/s."
- outboundNetwork:
- type: string
- pattern: "^[0-9]+([KMG]i?)?B/s$"
- description: "Broker capacity for outbound network throughput in bytes per second. Use an integer value with standard Kubernetes byte units (K, M, G) or their bibyte (power of two) equivalents (Ki, Mi, Gi) per second. For example, 10000KiB/s."
- required:
- - brokers
- description: Overrides for individual brokers. The `overrides` property lets you specify a different capacity configuration for different brokers.
- description: The Cruise Control `brokerCapacity` configuration.
- config:
- x-kubernetes-preserve-unknown-fields: true
- type: object
- description: "The Cruise Control configuration. For a full list of configuration options refer to https://github.com/linkedin/cruise-control/wiki/Configurations. Note that properties with the following prefixes cannot be set: bootstrap.servers, client.id, zookeeper., network., security., failed.brokers.zk.path,webserver.http., webserver.api.urlprefix, webserver.session.path, webserver.accesslog., two.step., request.reason.required,metric.reporter.sampler.bootstrap.servers, capacity.config.file, self.healing., ssl., kafka.broker.failure.detection.enable, topic.config.provider.class (with the exception of: ssl.cipher.suites, ssl.protocol, ssl.enabled.protocols, webserver.http.cors.enabled, webserver.http.cors.origin, webserver.http.cors.exposeheaders, webserver.security.enable, webserver.ssl.enable)."
- metricsConfig:
- type: object
- properties:
- type:
- type: string
- enum:
- - jmxPrometheusExporter
- description: Metrics type. Only 'jmxPrometheusExporter' supported currently.
- valueFrom:
- type: object
- properties:
- configMapKeyRef:
- type: object
- properties:
- key:
- type: string
- name:
- type: string
- optional:
- type: boolean
- description: Reference to the key in the ConfigMap containing the configuration.
- description: 'ConfigMap entry where the Prometheus JMX Exporter configuration is stored. '
- required:
- - type
- - valueFrom
- description: Metrics configuration.
- description: Configuration for Cruise Control deployment. Deploys a Cruise Control instance when specified.
- jmxTrans:
+ description: Specifies a specific persistent volume to use. It contains key:value pairs representing labels for selecting such a volume.
+ size:
+ type: string
+ description: "When `type=persistent-claim`, defines the size of the persistent volume claim, such as 100Gi. Mandatory when `type=persistent-claim`."
+ sizeLimit:
+ type: string
+ pattern: "^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
+ description: "When type=ephemeral, defines the total amount of local storage required for this EmptyDir volume (for example 1Gi)."
+ type:
+ type: string
+ enum:
+ - ephemeral
+ - persistent-claim
+ description: "Storage type, must be either 'ephemeral' or 'persistent-claim'."
+ required:
+ - type
+ description: List of volumes as Storage objects representing the JBOD disks array.
+ required:
+ - type
+ description: Storage configuration (disk). Cannot be updated.
+ roles:
+ type: array
+ items:
+ type: string
+ enum:
+ - controller
+ - broker
+ description: "The roles that the nodes in this pool will have when KRaft mode is enabled. Supported values are 'broker' and 'controller'. This field is required. When KRaft mode is disabled, the only allowed value if `broker`."
+ resources:
type: object
properties:
- image:
- type: string
- description: The image to use for the JmxTrans.
- outputDefinitions:
+ claims:
type: array
items:
type: object
properties:
- outputType:
- type: string
- description: "Template for setting the format of the data that will be pushed.For more information see https://github.com/jmxtrans/jmxtrans/wiki/OutputWriters[JmxTrans OutputWriters]."
- host:
- type: string
- description: The DNS/hostname of the remote host that the data is pushed to.
- port:
- type: integer
- description: The port of the remote host that the data is pushed to.
- flushDelayInSeconds:
- type: integer
- description: How many seconds the JmxTrans waits before pushing a new set of data out.
- typeNames:
- type: array
- items:
- type: string
- description: "Template for filtering data to be included in response to a wildcard query. For more information see https://github.com/jmxtrans/jmxtrans/wiki/Queries[JmxTrans queries]."
name:
type: string
- description: Template for setting the name of the output definition. This is used to identify where to send the results of queries should be sent.
- required:
- - outputType
- - name
- description: "Defines the output hosts that will be referenced later on. For more information on these properties see, xref:type-JmxTransOutputDefinitionTemplate-reference[`JmxTransOutputDefinitionTemplate` schema reference]."
- logLevel:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
+ x-kubernetes-int-or-string: true
+ type: object
+ description: CPU and memory resources to reserve.
+ jvmOptions:
+ type: object
+ properties:
+ "-XX":
+ additionalProperties:
+ type: string
+ type: object
+ description: A map of -XX options to the JVM.
+ "-Xmx":
type: string
- description: "Sets the logging level of the JmxTrans deployment.For more information see, https://github.com/jmxtrans/jmxtrans-agent/wiki/Troubleshooting[JmxTrans Logging Level]."
- kafkaQueries:
+ pattern: "^[0-9]+[mMgG]?$"
+ description: -Xmx option to to the JVM.
+ "-Xms":
+ type: string
+ pattern: "^[0-9]+[mMgG]?$"
+ description: -Xms option to to the JVM.
+ gcLoggingEnabled:
+ type: boolean
+ description: Specifies whether the Garbage Collection logging is enabled. The default is false.
+ javaSystemProperties:
type: array
items:
type: object
properties:
- targetMBean:
+ name:
type: string
- description: If using wildcards instead of a specific MBean then the data is gathered from multiple MBeans. Otherwise if specifying an MBean then data is gathered from that specified MBean.
- attributes:
- type: array
- items:
- type: string
- description: Determine which attributes of the targeted MBean should be included.
- outputs:
- type: array
- items:
- type: string
- description: "List of the names of output definitions specified in the spec.kafka.jmxTrans.outputDefinitions that have defined where JMX metrics are pushed to, and in which data format."
- required:
- - targetMBean
- - attributes
- - outputs
- description: "Queries to send to the Kafka brokers to define what data should be read from each broker. For more information on these properties see, xref:type-JmxTransQueryTemplate-reference[`JmxTransQueryTemplate` schema reference]."
- resources:
+ description: The system property name.
+ value:
+ type: string
+ description: The system property value.
+ description: A map of additional system properties which will be passed using the `-D` option to the JVM.
+ description: JVM Options for pods.
+ template:
+ type: object
+ properties:
+ podSet:
type: object
properties:
- claims:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ description: Template for Kafka `StrimziPodSet` resource.
+ pod:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ imagePullSecrets:
type: array
items:
type: object
properties:
name:
type: string
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
- x-kubernetes-int-or-string: true
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
- x-kubernetes-int-or-string: true
- type: object
- description: CPU and memory resources to reserve.
- template:
- type: object
- properties:
- deployment:
+ description: "List of references to secrets in the same namespace to use for pulling any of the images used by this Pod. When the `STRIMZI_IMAGE_PULL_SECRETS` environment variable in Cluster Operator and the `imagePullSecrets` option are specified, only the `imagePullSecrets` variable is used and the `STRIMZI_IMAGE_PULL_SECRETS` variable is ignored."
+ securityContext:
type: object
properties:
- metadata:
+ appArmorProfile:
type: object
properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- deploymentStrategy:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ fsGroup:
+ type: integer
+ fsGroupChangePolicy:
type: string
- enum:
- - RollingUpdate
- - Recreate
- description: Pod replacement strategy for deployment configuration changes. Valid values are `RollingUpdate` and `Recreate`. Defaults to `RollingUpdate`.
- description: Template for JmxTrans `Deployment`.
- pod:
- type: object
- properties:
- metadata:
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
type: object
properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- imagePullSecrets:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ seccompProfile:
+ type: object
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ supplementalGroups:
+ type: array
+ items:
+ type: integer
+ sysctls:
type: array
items:
type: object
properties:
name:
type: string
- description: "List of references to secrets in the same namespace to use for pulling any of the images used by this Pod. When the `STRIMZI_IMAGE_PULL_SECRETS` environment variable in Cluster Operator and the `imagePullSecrets` option are specified, only the `imagePullSecrets` variable is used and the `STRIMZI_IMAGE_PULL_SECRETS` variable is ignored."
- securityContext:
+ value:
+ type: string
+ windowsOptions:
type: object
properties:
- appArmorProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- fsGroup:
- type: integer
- fsGroupChangePolicy:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
type: string
- runAsGroup:
- type: integer
- runAsNonRoot:
+ hostProcess:
type: boolean
- runAsUser:
- type: integer
- seLinuxOptions:
- type: object
- properties:
- level:
- type: string
- role:
- type: string
- type:
- type: string
- user:
- type: string
- seccompProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- supplementalGroups:
- type: array
- items:
- type: integer
- sysctls:
+ runAsUserName:
+ type: string
+ description: Configures pod-level security attributes and common container settings.
+ terminationGracePeriodSeconds:
+ type: integer
+ minimum: 0
+ description: "The grace period is the duration in seconds after the processes running in the pod are sent a termination signal, and the time when the processes are forcibly halted with a kill signal. Set this value to longer than the expected cleanup time for your process. Value must be a non-negative integer. A zero value indicates delete immediately. You might need to increase the grace period for very large Kafka clusters, so that the Kafka brokers have enough time to transfer their work to another broker before they are terminated. Defaults to 30 seconds."
+ affinity:
+ type: object
+ properties:
+ nodeAffinity:
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
type: array
items:
type: object
properties:
- name:
- type: string
- value:
- type: string
- windowsOptions:
- type: object
- properties:
- gmsaCredentialSpec:
- type: string
- gmsaCredentialSpecName:
- type: string
- hostProcess:
- type: boolean
- runAsUserName:
- type: string
- description: Configures pod-level security attributes and common container settings.
- terminationGracePeriodSeconds:
- type: integer
- minimum: 0
- description: "The grace period is the duration in seconds after the processes running in the pod are sent a termination signal, and the time when the processes are forcibly halted with a kill signal. Set this value to longer than the expected cleanup time for your process. Value must be a non-negative integer. A zero value indicates delete immediately. You might need to increase the grace period for very large Kafka clusters, so that the Kafka brokers have enough time to transfer their work to another broker before they are terminated. Defaults to 30 seconds."
- affinity:
- type: object
- properties:
- nodeAffinity:
- type: object
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- type: array
- items:
+ preference:
type: object
properties:
- preference:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchFields:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- weight:
- type: integer
- requiredDuringSchedulingIgnoredDuringExecution:
- type: object
- properties:
- nodeSelectorTerms:
- type: array
- items:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchFields:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- podAffinity:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchFields:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
type: object
properties:
- preferredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
type: array
items:
type: object
properties:
- podAffinityTerm:
- type: object
- properties:
- labelSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- matchLabelKeys:
- type: array
- items:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
type: string
- mismatchLabelKeys:
- type: array
- items:
+ operator:
type: string
- namespaceSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- namespaces:
- type: array
- items:
+ values:
+ type: array
+ items:
+ type: string
+ matchFields:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
type: string
- topologyKey:
- type: string
- weight:
- type: integer
- requiredDuringSchedulingIgnoredDuringExecution:
- type: array
- items:
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ podAffinity:
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ podAffinityTerm:
type: object
properties:
labelSelector:
@@ -12554,76 +11600,76 @@ spec:
type: string
topologyKey:
type: string
- podAntiAffinity:
- type: object
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- type: array
- items:
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ labelSelector:
type: object
properties:
- podAffinityTerm:
- type: object
- properties:
- labelSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- matchLabelKeys:
- type: array
- items:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
type: string
- mismatchLabelKeys:
- type: array
- items:
+ operator:
type: string
- namespaceSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- namespaces:
- type: array
- items:
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ matchLabelKeys:
+ type: array
+ items:
+ type: string
+ mismatchLabelKeys:
+ type: array
+ items:
+ type: string
+ namespaceSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
type: string
- topologyKey:
- type: string
- weight:
- type: integer
- requiredDuringSchedulingIgnoredDuringExecution:
- type: array
- items:
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ podAntiAffinity:
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ podAffinityTerm:
type: object
properties:
labelSelector:
@@ -12680,562 +11726,1258 @@ spec:
type: string
topologyKey:
type: string
- description: The pod's affinity rules.
- tolerations:
- type: array
- items:
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ matchLabelKeys:
+ type: array
+ items:
+ type: string
+ mismatchLabelKeys:
+ type: array
+ items:
+ type: string
+ namespaceSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ description: The pod's affinity rules.
+ tolerations:
+ type: array
+ items:
+ type: object
+ properties:
+ effect:
+ type: string
+ key:
+ type: string
+ operator:
+ type: string
+ tolerationSeconds:
+ type: integer
+ value:
+ type: string
+ description: The pod's tolerations.
+ topologySpreadConstraints:
+ type: array
+ items:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ matchLabelKeys:
+ type: array
+ items:
+ type: string
+ maxSkew:
+ type: integer
+ minDomains:
+ type: integer
+ nodeAffinityPolicy:
+ type: string
+ nodeTaintsPolicy:
+ type: string
+ topologyKey:
+ type: string
+ whenUnsatisfiable:
+ type: string
+ description: The pod's topology spread constraints.
+ priorityClassName:
+ type: string
+ description: 'The name of the priority class used to assign priority to the pods. '
+ schedulerName:
+ type: string
+ description: "The name of the scheduler used to dispatch this `Pod`. If not specified, the default scheduler will be used."
+ hostAliases:
+ type: array
+ items:
+ type: object
+ properties:
+ hostnames:
+ type: array
+ items:
+ type: string
+ ip:
+ type: string
+ description: The pod's HostAliases. HostAliases is an optional list of hosts and IPs that will be injected into the Pod's hosts file if specified.
+ enableServiceLinks:
+ type: boolean
+ description: Indicates whether information about services should be injected into Pod's environment variables.
+ tmpDirSizeLimit:
+ type: string
+ pattern: "^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
+ description: "Defines the total amount of pod memory allocated for the temporary `EmptyDir` volume `/tmp`. Specify the allocation in memory units, for example, `100Mi` for 100 mebibytes. Default value is `5Mi`. The `/tmp` volume is backed by pod memory, not disk storage, so avoid setting a high value as it consumes pod memory resources."
+ volumes:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: Name to use for the volume. Required.
+ secret:
type: object
properties:
- effect:
- type: string
- key:
- type: string
- operator:
- type: string
- tolerationSeconds:
+ defaultMode:
type: integer
- value:
+ items:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ optional:
+ type: boolean
+ secretName:
type: string
- description: The pod's tolerations.
- topologySpreadConstraints:
- type: array
- items:
+ description: Secret to use populate the volume.
+ configMap:
type: object
properties:
- labelSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- matchLabelKeys:
+ defaultMode:
+ type: integer
+ items:
type: array
items:
- type: string
- maxSkew:
- type: integer
- minDomains:
- type: integer
- nodeAffinityPolicy:
- type: string
- nodeTaintsPolicy:
- type: string
- topologyKey:
+ type: object
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ name:
type: string
- whenUnsatisfiable:
+ optional:
+ type: boolean
+ description: ConfigMap to use to populate the volume.
+ emptyDir:
+ type: object
+ properties:
+ medium:
type: string
- description: The pod's topology spread constraints.
- priorityClassName:
- type: string
- description: 'The name of the priority class used to assign priority to the pods. '
- schedulerName:
- type: string
- description: "The name of the scheduler used to dispatch this `Pod`. If not specified, the default scheduler will be used."
- hostAliases:
- type: array
- items:
+ sizeLimit:
+ type: object
+ properties:
+ amount:
+ type: string
+ format:
+ type: string
+ description: EmptyDir to use to populate the volume.
+ persistentVolumeClaim:
type: object
properties:
- hostnames:
- type: array
- items:
- type: string
- ip:
+ claimName:
type: string
- description: The pod's HostAliases. HostAliases is an optional list of hosts and IPs that will be injected into the Pod's hosts file if specified.
- enableServiceLinks:
- type: boolean
- description: Indicates whether information about services should be injected into Pod's environment variables.
- tmpDirSizeLimit:
- type: string
- pattern: "^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
- description: Defines the total amount (for example `1Gi`) of local storage required for temporary EmptyDir volume (`/tmp`). Default value is `5Mi`.
- description: Template for JmxTrans `Pods`.
- container:
+ readOnly:
+ type: boolean
+ description: PersistentVolumeClaim object to use to populate the volume.
+ oneOf:
+ - properties:
+ secret: {}
+ configMap: {}
+ emptyDir: {}
+ persistentVolumeClaim: {}
+ required: []
+ description: Additional volumes that can be mounted to the pod.
+ description: Template for Kafka `Pods`.
+ perPodService:
+ type: object
+ properties:
+ metadata:
type: object
properties:
- env:
- type: array
- items:
- type: object
- properties:
- name:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ description: Template for Kafka per-pod `Services` used for access from outside of Kubernetes.
+ perPodRoute:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ description: Template for Kafka per-pod `Routes` used for access from outside of OpenShift.
+ perPodIngress:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ description: Template for Kafka per-pod `Ingress` used for access from outside of Kubernetes.
+ persistentVolumeClaim:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ description: Template for all Kafka `PersistentVolumeClaims`.
+ kafkaContainer:
+ type: object
+ properties:
+ env:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: The environment variable key.
+ value:
+ type: string
+ description: The environment variable value.
+ description: Environment variables which should be applied to the container.
+ securityContext:
+ type: object
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ appArmorProfile:
+ type: object
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ capabilities:
+ type: object
+ properties:
+ add:
+ type: array
+ items:
type: string
- description: The environment variable key.
- value:
+ drop:
+ type: array
+ items:
type: string
- description: The environment variable value.
- description: Environment variables which should be applied to the container.
- securityContext:
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
type: object
properties:
- allowPrivilegeEscalation:
- type: boolean
- appArmorProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- capabilities:
- type: object
- properties:
- add:
- type: array
- items:
- type: string
- drop:
- type: array
- items:
- type: string
- privileged:
- type: boolean
- procMount:
+ level:
type: string
- readOnlyRootFilesystem:
- type: boolean
- runAsGroup:
- type: integer
- runAsNonRoot:
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ seccompProfile:
+ type: object
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ windowsOptions:
+ type: object
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
type: boolean
- runAsUser:
- type: integer
- seLinuxOptions:
- type: object
- properties:
- level:
- type: string
- role:
- type: string
- type:
- type: string
- user:
- type: string
- seccompProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- windowsOptions:
- type: object
- properties:
- gmsaCredentialSpec:
- type: string
- gmsaCredentialSpecName:
- type: string
- hostProcess:
- type: boolean
- runAsUserName:
- type: string
- description: Security context for the container.
- description: Template for JmxTrans container.
- serviceAccount:
+ runAsUserName:
+ type: string
+ description: Security context for the container.
+ volumeMounts:
+ type: array
+ items:
+ type: object
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ recursiveReadOnly:
+ type: string
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ description: Additional volume mounts which should be applied to the container.
+ description: Template for the Kafka broker container.
+ initContainer:
+ type: object
+ properties:
+ env:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: The environment variable key.
+ value:
+ type: string
+ description: The environment variable value.
+ description: Environment variables which should be applied to the container.
+ securityContext:
type: object
properties:
- metadata:
+ allowPrivilegeEscalation:
+ type: boolean
+ appArmorProfile:
type: object
properties:
- labels:
- additionalProperties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ capabilities:
+ type: object
+ properties:
+ add:
+ type: array
+ items:
type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
+ drop:
+ type: array
+ items:
type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- description: Template for the JmxTrans service account.
- description: Template for JmxTrans resources.
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
+ type: object
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ seccompProfile:
+ type: object
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ windowsOptions:
+ type: object
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ description: Security context for the container.
+ volumeMounts:
+ type: array
+ items:
+ type: object
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ recursiveReadOnly:
+ type: string
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ description: Additional volume mounts which should be applied to the container.
+ description: Template for the Kafka init container.
+ description: Template for pool resources. The template allows users to specify how the resources belonging to this pool are generated.
+ required:
+ - replicas
+ - storage
+ - roles
+ description: The specification of the KafkaNodePool.
+ status:
+ type: object
+ properties:
+ conditions:
+ type: array
+ items:
+ type: object
+ properties:
+ type:
+ type: string
+ description: "The unique identifier of a condition, used to distinguish between other conditions in the resource."
+ status:
+ type: string
+ description: "The status of the condition, either True, False or Unknown."
+ lastTransitionTime:
+ type: string
+ description: "Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone."
+ reason:
+ type: string
+ description: The reason for the condition's last transition (a single word in CamelCase).
+ message:
+ type: string
+ description: Human-readable message indicating details about the condition's last transition.
+ description: List of status conditions.
+ observedGeneration:
+ type: integer
+ description: The generation of the CRD that was last reconciled by the operator.
+ nodeIds:
+ type: array
+ items:
+ type: integer
+ description: Node IDs used by Kafka nodes in this pool.
+ clusterId:
+ type: string
+ description: Kafka cluster ID.
+ roles:
+ type: array
+ items:
+ type: string
+ enum:
+ - controller
+ - broker
+ description: The roles currently assigned to this pool.
+ replicas:
+ type: integer
+ description: The current number of pods being used to provide this resource.
+ labelSelector:
+ type: string
+ description: Label selector for pods providing this resource.
+ description: The status of the KafkaNodePool.
+
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: kafkabridges.kafka.strimzi.io
+ labels:
+ app: strimzi
+ strimzi.io/crd-install: "true"
+spec:
+ group: kafka.strimzi.io
+ names:
+ kind: KafkaBridge
+ listKind: KafkaBridgeList
+ singular: kafkabridge
+ plural: kafkabridges
+ shortNames:
+ - kb
+ categories:
+ - strimzi
+ scope: Namespaced
+ conversion:
+ strategy: None
+ versions:
+ - name: v1beta2
+ served: true
+ storage: true
+ subresources:
+ status: {}
+ scale:
+ specReplicasPath: .spec.replicas
+ statusReplicasPath: .status.replicas
+ labelSelectorPath: .status.labelSelector
+ additionalPrinterColumns:
+ - name: Desired replicas
+ description: The desired number of Kafka Bridge replicas
+ jsonPath: .spec.replicas
+ type: integer
+ - name: Bootstrap Servers
+ description: The boostrap servers
+ jsonPath: .spec.bootstrapServers
+ type: string
+ priority: 1
+ - name: Ready
+ description: The state of the custom resource
+ jsonPath: ".status.conditions[?(@.type==\"Ready\")].status"
+ type: string
+ schema:
+ openAPIV3Schema:
+ type: object
+ properties:
+ apiVersion:
+ type: string
+ description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources"
+ kind:
+ type: string
+ description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
+ metadata:
+ type: object
+ spec:
+ type: object
+ properties:
+ replicas:
+ type: integer
+ minimum: 0
+ description: The number of pods in the `Deployment`. Defaults to `1`.
+ image:
+ type: string
+ description: "The container image used for Kafka Bridge pods. If no image name is explicitly specified, the image name corresponds to the image specified in the Cluster Operator configuration. If an image name is not defined in the Cluster Operator configuration, a default value is used."
+ bootstrapServers:
+ type: string
+ description: A list of host:port pairs for establishing the initial connection to the Kafka cluster.
+ tls:
+ type: object
+ properties:
+ trustedCertificates:
+ type: array
+ items:
+ type: object
+ properties:
+ secretName:
+ type: string
+ description: The name of the Secret containing the certificate.
+ certificate:
+ type: string
+ description: The name of the file certificate in the secret.
+ pattern:
+ type: string
+ description: "Pattern for the certificate files in the secret. Use the link:https://en.wikipedia.org/wiki/Glob_(programming)[_glob syntax_] for the pattern. All files in the secret that match the pattern are used."
+ oneOf:
+ - properties:
+ certificate: {}
+ required:
+ - certificate
+ - properties:
+ pattern: {}
+ required:
+ - pattern
+ required:
+ - secretName
+ description: Trusted certificates for TLS connection.
+ description: TLS configuration for connecting Kafka Bridge to the cluster.
+ authentication:
+ type: object
+ properties:
+ accessToken:
+ type: object
+ properties:
+ key:
+ type: string
+ description: The key under which the secret value is stored in the Kubernetes Secret.
+ secretName:
+ type: string
+ description: The name of the Kubernetes Secret containing the secret value.
+ required:
+ - key
+ - secretName
+ description: Link to Kubernetes Secret containing the access token which was obtained from the authorization server.
+ accessTokenIsJwt:
+ type: boolean
+ description: Configure whether access token should be treated as JWT. This should be set to `false` if the authorization server returns opaque tokens. Defaults to `true`.
+ accessTokenLocation:
+ type: string
+ description: Path to the token file containing an access token to be used for authentication.
+ audience:
+ type: string
+ description: "OAuth audience to use when authenticating against the authorization server. Some authorization servers require the audience to be explicitly set. The possible values depend on how the authorization server is configured. By default, `audience` is not specified when performing the token endpoint request."
+ certificateAndKey:
+ type: object
+ properties:
+ secretName:
+ type: string
+ description: The name of the Secret containing the certificate.
+ certificate:
+ type: string
+ description: The name of the file certificate in the Secret.
+ key:
+ type: string
+ description: The name of the private key in the Secret.
+ required:
+ - secretName
+ - certificate
+ - key
+ description: Reference to the `Secret` which holds the certificate and private key pair.
+ clientAssertion:
+ type: object
+ properties:
+ key:
+ type: string
+ description: The key under which the secret value is stored in the Kubernetes Secret.
+ secretName:
+ type: string
+ description: The name of the Kubernetes Secret containing the secret value.
+ required:
+ - key
+ - secretName
+ description: Link to Kubernetes secret containing the client assertion which was manually configured for the client.
+ clientAssertionLocation:
+ type: string
+ description: Path to the file containing the client assertion to be used for authentication.
+ clientAssertionType:
+ type: string
+ description: "The client assertion type. If not set, and either `clientAssertion` or `clientAssertionLocation` is configured, this value defaults to `urn:ietf:params:oauth:client-assertion-type:jwt-bearer`."
+ clientId:
+ type: string
+ description: OAuth Client ID which the Kafka client can use to authenticate against the OAuth server and use the token endpoint URI.
+ clientSecret:
+ type: object
+ properties:
+ key:
+ type: string
+ description: The key under which the secret value is stored in the Kubernetes Secret.
+ secretName:
+ type: string
+ description: The name of the Kubernetes Secret containing the secret value.
+ required:
+ - key
+ - secretName
+ description: Link to Kubernetes Secret containing the OAuth client secret which the Kafka client can use to authenticate against the OAuth server and use the token endpoint URI.
+ connectTimeoutSeconds:
+ type: integer
+ description: "The connect timeout in seconds when connecting to authorization server. If not set, the effective connect timeout is 60 seconds."
+ disableTlsHostnameVerification:
+ type: boolean
+ description: Enable or disable TLS hostname verification. Default value is `false`.
+ enableMetrics:
+ type: boolean
+ description: Enable or disable OAuth metrics. Default value is `false`.
+ httpRetries:
+ type: integer
+ description: "The maximum number of retries to attempt if an initial HTTP request fails. If not set, the default is to not attempt any retries."
+ httpRetryPauseMs:
+ type: integer
+ description: "The pause to take before retrying a failed HTTP request. If not set, the default is to not pause at all but to immediately repeat a request."
+ includeAcceptHeader:
+ type: boolean
+ description: Whether the Accept header should be set in requests to the authorization servers. The default value is `true`.
+ maxTokenExpirySeconds:
+ type: integer
+ description: Set or limit time-to-live of the access tokens to the specified number of seconds. This should be set if the authorization server returns opaque tokens.
+ passwordSecret:
+ type: object
+ properties:
+ secretName:
+ type: string
+ description: The name of the Secret containing the password.
+ password:
+ type: string
+ description: The name of the key in the Secret under which the password is stored.
+ required:
+ - secretName
+ - password
+ description: Reference to the `Secret` which holds the password.
+ readTimeoutSeconds:
+ type: integer
+ description: "The read timeout in seconds when connecting to authorization server. If not set, the effective read timeout is 60 seconds."
+ refreshToken:
+ type: object
+ properties:
+ key:
+ type: string
+ description: The key under which the secret value is stored in the Kubernetes Secret.
+ secretName:
+ type: string
+ description: The name of the Kubernetes Secret containing the secret value.
+ required:
+ - key
+ - secretName
+ description: Link to Kubernetes Secret containing the refresh token which can be used to obtain access token from the authorization server.
+ saslExtensions:
+ additionalProperties:
+ type: string
+ type: object
+ description: SASL extensions parameters.
+ scope:
+ type: string
+ description: OAuth scope to use when authenticating against the authorization server. Some authorization servers require this to be set. The possible values depend on how authorization server is configured. By default `scope` is not specified when doing the token endpoint request.
+ tlsTrustedCertificates:
+ type: array
+ items:
+ type: object
+ properties:
+ secretName:
+ type: string
+ description: The name of the Secret containing the certificate.
+ certificate:
+ type: string
+ description: The name of the file certificate in the secret.
+ pattern:
+ type: string
+ description: "Pattern for the certificate files in the secret. Use the link:https://en.wikipedia.org/wiki/Glob_(programming)[_glob syntax_] for the pattern. All files in the secret that match the pattern are used."
+ oneOf:
+ - properties:
+ certificate: {}
+ required:
+ - certificate
+ - properties:
+ pattern: {}
+ required:
+ - pattern
+ required:
+ - secretName
+ description: Trusted certificates for TLS connection to the OAuth server.
+ tokenEndpointUri:
+ type: string
+ description: Authorization server token endpoint URI.
+ type:
+ type: string
+ enum:
+ - tls
+ - scram-sha-256
+ - scram-sha-512
+ - plain
+ - oauth
+ description: "Authentication type. Currently the supported types are `tls`, `scram-sha-256`, `scram-sha-512`, `plain`, and 'oauth'. `scram-sha-256` and `scram-sha-512` types use SASL SCRAM-SHA-256 and SASL SCRAM-SHA-512 Authentication, respectively. `plain` type uses SASL PLAIN Authentication. `oauth` type uses SASL OAUTHBEARER Authentication. The `tls` type uses TLS Client Authentication. The `tls` type is supported only over TLS connections."
+ username:
+ type: string
+ description: Username used for the authentication.
required:
- - outputDefinitions
- - kafkaQueries
- description: "As of Strimzi 0.35.0, JMXTrans is not supported anymore and this option is ignored."
- kafkaExporter:
+ - type
+ description: Authentication configuration for connecting to the cluster.
+ http:
type: object
properties:
- image:
- type: string
- description: "The container image used for the Kafka Exporter pods. If no image name is explicitly specified, the image name corresponds to the version specified in the Cluster Operator configuration. If an image name is not defined in the Cluster Operator configuration, a default value is used."
- groupRegex:
+ port:
+ type: integer
+ minimum: 1023
+ description: The port which is the server listening on.
+ cors:
+ type: object
+ properties:
+ allowedOrigins:
+ type: array
+ items:
+ type: string
+ description: List of allowed origins. Java regular expressions can be used.
+ allowedMethods:
+ type: array
+ items:
+ type: string
+ description: List of allowed HTTP methods.
+ required:
+ - allowedOrigins
+ - allowedMethods
+ description: CORS configuration for the HTTP Bridge.
+ description: The HTTP related configuration.
+ adminClient:
+ type: object
+ properties:
+ config:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ description: The Kafka AdminClient configuration used for AdminClient instances created by the bridge.
+ description: Kafka AdminClient related configuration.
+ consumer:
+ type: object
+ properties:
+ enabled:
+ type: boolean
+ description: Whether the HTTP consumer should be enabled or disabled. The default is enabled (`true`).
+ timeoutSeconds:
+ type: integer
+ description: "The timeout in seconds for deleting inactive consumers, default is -1 (disabled)."
+ config:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ description: "The Kafka consumer configuration used for consumer instances created by the bridge. Properties with the following prefixes cannot be set: ssl., bootstrap.servers, group.id, sasl., security. (with the exception of: ssl.endpoint.identification.algorithm, ssl.cipher.suites, ssl.protocol, ssl.enabled.protocols)."
+ description: Kafka consumer related configuration.
+ producer:
+ type: object
+ properties:
+ enabled:
+ type: boolean
+ description: Whether the HTTP producer should be enabled or disabled. The default is enabled (`true`).
+ config:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ description: "The Kafka producer configuration used for producer instances created by the bridge. Properties with the following prefixes cannot be set: ssl., bootstrap.servers, sasl., security. (with the exception of: ssl.endpoint.identification.algorithm, ssl.cipher.suites, ssl.protocol, ssl.enabled.protocols)."
+ description: Kafka producer related configuration.
+ resources:
+ type: object
+ properties:
+ claims:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
+ x-kubernetes-int-or-string: true
+ type: object
+ description: CPU and memory resources to reserve.
+ jvmOptions:
+ type: object
+ properties:
+ "-XX":
+ additionalProperties:
+ type: string
+ type: object
+ description: A map of -XX options to the JVM.
+ "-Xmx":
type: string
- description: Regular expression to specify which consumer groups to collect. Default value is `.*`.
- topicRegex:
+ pattern: "^[0-9]+[mMgG]?$"
+ description: -Xmx option to to the JVM.
+ "-Xms":
type: string
- description: Regular expression to specify which topics to collect. Default value is `.*`.
- groupExcludeRegex:
+ pattern: "^[0-9]+[mMgG]?$"
+ description: -Xms option to to the JVM.
+ gcLoggingEnabled:
+ type: boolean
+ description: Specifies whether the Garbage Collection logging is enabled. The default is false.
+ javaSystemProperties:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: The system property name.
+ value:
+ type: string
+ description: The system property value.
+ description: A map of additional system properties which will be passed using the `-D` option to the JVM.
+ description: '**Currently not supported** JVM Options for pods.'
+ logging:
+ type: object
+ properties:
+ loggers:
+ additionalProperties:
+ type: string
+ type: object
+ description: A Map from logger name to logger level.
+ type:
type: string
- description: Regular expression to specify which consumer groups to exclude.
- topicExcludeRegex:
+ enum:
+ - inline
+ - external
+ description: "Logging type, must be either 'inline' or 'external'."
+ valueFrom:
+ type: object
+ properties:
+ configMapKeyRef:
+ type: object
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ description: Reference to the key in the ConfigMap containing the configuration.
+ description: '`ConfigMap` entry where the logging configuration is stored. '
+ required:
+ - type
+ description: Logging configuration for Kafka Bridge.
+ clientRackInitImage:
+ type: string
+ description: The image of the init container used for initializing the `client.rack`.
+ rack:
+ type: object
+ properties:
+ topologyKey:
type: string
- description: Regular expression to specify which topics to exclude.
- resources:
+ example: topology.kubernetes.io/zone
+ description: "A key that matches labels assigned to the Kubernetes cluster nodes. The value of the label is used to set a broker's `broker.rack` config, and the `client.rack` config for Kafka Connect or MirrorMaker 2."
+ required:
+ - topologyKey
+ description: Configuration of the node label which will be used as the client.rack consumer configuration.
+ enableMetrics:
+ type: boolean
+ description: Enable the metrics for the Kafka Bridge. Default is false.
+ livenessProbe:
+ type: object
+ properties:
+ initialDelaySeconds:
+ type: integer
+ minimum: 0
+ description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
+ timeoutSeconds:
+ type: integer
+ minimum: 1
+ description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
+ periodSeconds:
+ type: integer
+ minimum: 1
+ description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
+ successThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
+ failureThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
+ description: Pod liveness checking.
+ readinessProbe:
+ type: object
+ properties:
+ initialDelaySeconds:
+ type: integer
+ minimum: 0
+ description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
+ timeoutSeconds:
+ type: integer
+ minimum: 1
+ description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
+ periodSeconds:
+ type: integer
+ minimum: 1
+ description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
+ successThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
+ failureThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
+ description: Pod readiness checking.
+ template:
+ type: object
+ properties:
+ deployment:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ deploymentStrategy:
+ type: string
+ enum:
+ - RollingUpdate
+ - Recreate
+ description: Pod replacement strategy for deployment configuration changes. Valid values are `RollingUpdate` and `Recreate`. Defaults to `RollingUpdate`.
+ description: Template for Kafka Bridge `Deployment`.
+ pod:
type: object
properties:
- claims:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ imagePullSecrets:
type: array
items:
type: object
properties:
name:
type: string
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
- x-kubernetes-int-or-string: true
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
- x-kubernetes-int-or-string: true
- type: object
- description: CPU and memory resources to reserve.
- logging:
- type: string
- description: "Only log messages with the given severity or above. Valid levels: [`info`, `debug`, `trace`]. Default log level is `info`."
- livenessProbe:
- type: object
- properties:
- initialDelaySeconds:
- type: integer
- minimum: 0
- description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
- timeoutSeconds:
- type: integer
- minimum: 1
- description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
- periodSeconds:
- type: integer
- minimum: 1
- description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
- successThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
- failureThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
- description: Pod liveness check.
- readinessProbe:
- type: object
- properties:
- initialDelaySeconds:
- type: integer
- minimum: 0
- description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
- timeoutSeconds:
- type: integer
- minimum: 1
- description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
- periodSeconds:
- type: integer
- minimum: 1
- description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
- successThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
- failureThreshold:
- type: integer
- minimum: 1
- description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
- description: Pod readiness check.
- enableSaramaLogging:
- type: boolean
- description: "Enable Sarama logging, a Go client library used by the Kafka Exporter."
- showAllOffsets:
- type: boolean
- description: "Whether show the offset/lag for all consumer group, otherwise, only show connected consumer groups."
- template:
- type: object
- properties:
- deployment:
+ description: "List of references to secrets in the same namespace to use for pulling any of the images used by this Pod. When the `STRIMZI_IMAGE_PULL_SECRETS` environment variable in Cluster Operator and the `imagePullSecrets` option are specified, only the `imagePullSecrets` variable is used and the `STRIMZI_IMAGE_PULL_SECRETS` variable is ignored."
+ securityContext:
type: object
properties:
- metadata:
+ appArmorProfile:
type: object
properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- deploymentStrategy:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ fsGroup:
+ type: integer
+ fsGroupChangePolicy:
type: string
- enum:
- - RollingUpdate
- - Recreate
- description: Pod replacement strategy for deployment configuration changes. Valid values are `RollingUpdate` and `Recreate`. Defaults to `RollingUpdate`.
- description: Template for Kafka Exporter `Deployment`.
- pod:
- type: object
- properties:
- metadata:
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
type: object
properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- imagePullSecrets:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ seccompProfile:
+ type: object
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ supplementalGroups:
+ type: array
+ items:
+ type: integer
+ sysctls:
type: array
items:
type: object
properties:
name:
type: string
- description: "List of references to secrets in the same namespace to use for pulling any of the images used by this Pod. When the `STRIMZI_IMAGE_PULL_SECRETS` environment variable in Cluster Operator and the `imagePullSecrets` option are specified, only the `imagePullSecrets` variable is used and the `STRIMZI_IMAGE_PULL_SECRETS` variable is ignored."
- securityContext:
+ value:
+ type: string
+ windowsOptions:
type: object
properties:
- appArmorProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- fsGroup:
- type: integer
- fsGroupChangePolicy:
+ gmsaCredentialSpec:
type: string
- runAsGroup:
- type: integer
- runAsNonRoot:
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
type: boolean
- runAsUser:
- type: integer
- seLinuxOptions:
- type: object
- properties:
- level:
- type: string
- role:
- type: string
- type:
- type: string
- user:
- type: string
- seccompProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- supplementalGroups:
- type: array
- items:
- type: integer
- sysctls:
- type: array
- items:
- type: object
- properties:
- name:
- type: string
- value:
- type: string
- windowsOptions:
- type: object
- properties:
- gmsaCredentialSpec:
- type: string
- gmsaCredentialSpecName:
- type: string
- hostProcess:
- type: boolean
- runAsUserName:
- type: string
- description: Configures pod-level security attributes and common container settings.
- terminationGracePeriodSeconds:
- type: integer
- minimum: 0
- description: "The grace period is the duration in seconds after the processes running in the pod are sent a termination signal, and the time when the processes are forcibly halted with a kill signal. Set this value to longer than the expected cleanup time for your process. Value must be a non-negative integer. A zero value indicates delete immediately. You might need to increase the grace period for very large Kafka clusters, so that the Kafka brokers have enough time to transfer their work to another broker before they are terminated. Defaults to 30 seconds."
- affinity:
+ runAsUserName:
+ type: string
+ description: Configures pod-level security attributes and common container settings.
+ terminationGracePeriodSeconds:
+ type: integer
+ minimum: 0
+ description: "The grace period is the duration in seconds after the processes running in the pod are sent a termination signal, and the time when the processes are forcibly halted with a kill signal. Set this value to longer than the expected cleanup time for your process. Value must be a non-negative integer. A zero value indicates delete immediately. You might need to increase the grace period for very large Kafka clusters, so that the Kafka brokers have enough time to transfer their work to another broker before they are terminated. Defaults to 30 seconds."
+ affinity:
+ type: object
+ properties:
+ nodeAffinity:
type: object
properties:
- nodeAffinity:
- type: object
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- type: array
- items:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ preference:
type: object
properties:
- preference:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchFields:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- weight:
- type: integer
- requiredDuringSchedulingIgnoredDuringExecution:
- type: object
- properties:
- nodeSelectorTerms:
- type: array
- items:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchFields:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- podAffinity:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchFields:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
type: object
properties:
- preferredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
type: array
items:
type: object
properties:
- podAffinityTerm:
- type: object
- properties:
- labelSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- matchLabelKeys:
- type: array
- items:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
type: string
- mismatchLabelKeys:
- type: array
- items:
+ operator:
type: string
- namespaceSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- namespaces:
- type: array
- items:
+ values:
+ type: array
+ items:
+ type: string
+ matchFields:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
type: string
- topologyKey:
- type: string
- weight:
- type: integer
- requiredDuringSchedulingIgnoredDuringExecution:
- type: array
- items:
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ podAffinity:
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ podAffinityTerm:
type: object
properties:
labelSelector:
@@ -13292,76 +13034,76 @@ spec:
type: string
topologyKey:
type: string
- podAntiAffinity:
- type: object
- properties:
- preferredDuringSchedulingIgnoredDuringExecution:
- type: array
- items:
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ labelSelector:
type: object
properties:
- podAffinityTerm:
- type: object
- properties:
- labelSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- matchLabelKeys:
- type: array
- items:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
type: string
- mismatchLabelKeys:
- type: array
- items:
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ matchLabelKeys:
+ type: array
+ items:
+ type: string
+ mismatchLabelKeys:
+ type: array
+ items:
+ type: string
+ namespaceSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
type: string
- namespaceSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- namespaces:
- type: array
- items:
+ operator:
type: string
- topologyKey:
- type: string
- weight:
- type: integer
- requiredDuringSchedulingIgnoredDuringExecution:
- type: array
- items:
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ podAntiAffinity:
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ podAffinityTerm:
type: object
properties:
labelSelector:
@@ -13418,218 +13160,540 @@ spec:
type: string
topologyKey:
type: string
- description: The pod's affinity rules.
- tolerations:
- type: array
- items:
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ matchLabelKeys:
+ type: array
+ items:
+ type: string
+ mismatchLabelKeys:
+ type: array
+ items:
+ type: string
+ namespaceSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ description: The pod's affinity rules.
+ tolerations:
+ type: array
+ items:
+ type: object
+ properties:
+ effect:
+ type: string
+ key:
+ type: string
+ operator:
+ type: string
+ tolerationSeconds:
+ type: integer
+ value:
+ type: string
+ description: The pod's tolerations.
+ topologySpreadConstraints:
+ type: array
+ items:
+ type: object
+ properties:
+ labelSelector:
type: object
properties:
- effect:
- type: string
- key:
- type: string
- operator:
- type: string
- tolerationSeconds:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ matchLabelKeys:
+ type: array
+ items:
+ type: string
+ maxSkew:
+ type: integer
+ minDomains:
+ type: integer
+ nodeAffinityPolicy:
+ type: string
+ nodeTaintsPolicy:
+ type: string
+ topologyKey:
+ type: string
+ whenUnsatisfiable:
+ type: string
+ description: The pod's topology spread constraints.
+ priorityClassName:
+ type: string
+ description: 'The name of the priority class used to assign priority to the pods. '
+ schedulerName:
+ type: string
+ description: "The name of the scheduler used to dispatch this `Pod`. If not specified, the default scheduler will be used."
+ hostAliases:
+ type: array
+ items:
+ type: object
+ properties:
+ hostnames:
+ type: array
+ items:
+ type: string
+ ip:
+ type: string
+ description: The pod's HostAliases. HostAliases is an optional list of hosts and IPs that will be injected into the Pod's hosts file if specified.
+ enableServiceLinks:
+ type: boolean
+ description: Indicates whether information about services should be injected into Pod's environment variables.
+ tmpDirSizeLimit:
+ type: string
+ pattern: "^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
+ description: "Defines the total amount of pod memory allocated for the temporary `EmptyDir` volume `/tmp`. Specify the allocation in memory units, for example, `100Mi` for 100 mebibytes. Default value is `5Mi`. The `/tmp` volume is backed by pod memory, not disk storage, so avoid setting a high value as it consumes pod memory resources."
+ volumes:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: Name to use for the volume. Required.
+ secret:
+ type: object
+ properties:
+ defaultMode:
type: integer
- value:
+ items:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ optional:
+ type: boolean
+ secretName:
type: string
- description: The pod's tolerations.
- topologySpreadConstraints:
- type: array
- items:
+ description: Secret to use populate the volume.
+ configMap:
type: object
properties:
- labelSelector:
- type: object
- properties:
- matchExpressions:
- type: array
- items:
- type: object
- properties:
- key:
- type: string
- operator:
- type: string
- values:
- type: array
- items:
- type: string
- matchLabels:
- additionalProperties:
- type: string
- type: object
- matchLabelKeys:
+ defaultMode:
+ type: integer
+ items:
type: array
items:
- type: string
- maxSkew:
- type: integer
- minDomains:
- type: integer
- nodeAffinityPolicy:
- type: string
- nodeTaintsPolicy:
- type: string
- topologyKey:
+ type: object
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ name:
type: string
- whenUnsatisfiable:
+ optional:
+ type: boolean
+ description: ConfigMap to use to populate the volume.
+ emptyDir:
+ type: object
+ properties:
+ medium:
type: string
- description: The pod's topology spread constraints.
- priorityClassName:
- type: string
- description: 'The name of the priority class used to assign priority to the pods. '
- schedulerName:
- type: string
- description: "The name of the scheduler used to dispatch this `Pod`. If not specified, the default scheduler will be used."
- hostAliases:
- type: array
- items:
+ sizeLimit:
+ type: object
+ properties:
+ amount:
+ type: string
+ format:
+ type: string
+ description: EmptyDir to use to populate the volume.
+ persistentVolumeClaim:
type: object
properties:
- hostnames:
- type: array
- items:
- type: string
- ip:
+ claimName:
type: string
- description: The pod's HostAliases. HostAliases is an optional list of hosts and IPs that will be injected into the Pod's hosts file if specified.
- enableServiceLinks:
+ readOnly:
+ type: boolean
+ description: PersistentVolumeClaim object to use to populate the volume.
+ oneOf:
+ - properties:
+ secret: {}
+ configMap: {}
+ emptyDir: {}
+ persistentVolumeClaim: {}
+ required: []
+ description: Additional volumes that can be mounted to the pod.
+ description: Template for Kafka Bridge `Pods`.
+ apiService:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ ipFamilyPolicy:
+ type: string
+ enum:
+ - SingleStack
+ - PreferDualStack
+ - RequireDualStack
+ description: "Specifies the IP Family Policy used by the service. Available options are `SingleStack`, `PreferDualStack` and `RequireDualStack`. `SingleStack` is for a single IP family. `PreferDualStack` is for two IP families on dual-stack configured clusters or a single IP family on single-stack clusters. `RequireDualStack` fails unless there are two IP families on dual-stack configured clusters. If unspecified, Kubernetes will choose the default value based on the service type."
+ ipFamilies:
+ type: array
+ items:
+ type: string
+ enum:
+ - IPv4
+ - IPv6
+ description: "Specifies the IP Families used by the service. Available options are `IPv4` and `IPv6`. If unspecified, Kubernetes will choose the default value based on the `ipFamilyPolicy` setting."
+ description: Template for Kafka Bridge API `Service`.
+ podDisruptionBudget:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata to apply to the `PodDisruptionBudgetTemplate` resource.
+ maxUnavailable:
+ type: integer
+ minimum: 0
+ description: "Maximum number of unavailable pods to allow automatic Pod eviction. A Pod eviction is allowed when the `maxUnavailable` number of pods or fewer are unavailable after the eviction. Setting this value to 0 prevents all voluntary evictions, so the pods must be evicted manually. Defaults to 1."
+ description: Template for Kafka Bridge `PodDisruptionBudget`.
+ bridgeContainer:
+ type: object
+ properties:
+ env:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: The environment variable key.
+ value:
+ type: string
+ description: The environment variable value.
+ description: Environment variables which should be applied to the container.
+ securityContext:
+ type: object
+ properties:
+ allowPrivilegeEscalation:
type: boolean
- description: Indicates whether information about services should be injected into Pod's environment variables.
- tmpDirSizeLimit:
+ appArmorProfile:
+ type: object
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ capabilities:
+ type: object
+ properties:
+ add:
+ type: array
+ items:
+ type: string
+ drop:
+ type: array
+ items:
+ type: string
+ privileged:
+ type: boolean
+ procMount:
type: string
- pattern: "^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
- description: Defines the total amount (for example `1Gi`) of local storage required for temporary EmptyDir volume (`/tmp`). Default value is `5Mi`.
- description: Template for Kafka Exporter `Pods`.
- service:
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
+ type: object
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ seccompProfile:
+ type: object
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ windowsOptions:
+ type: object
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ description: Security context for the container.
+ volumeMounts:
+ type: array
+ items:
+ type: object
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ recursiveReadOnly:
+ type: string
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ description: Additional volume mounts which should be applied to the container.
+ description: Template for the Kafka Bridge container.
+ clusterRoleBinding:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ description: Template for the Kafka Bridge ClusterRoleBinding.
+ serviceAccount:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ description: Template for the Kafka Bridge service account.
+ initContainer:
+ type: object
+ properties:
+ env:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: The environment variable key.
+ value:
+ type: string
+ description: The environment variable value.
+ description: Environment variables which should be applied to the container.
+ securityContext:
type: object
properties:
- metadata:
+ allowPrivilegeEscalation:
+ type: boolean
+ appArmorProfile:
type: object
properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- description: Template for Kafka Exporter `Service`.
- container:
- type: object
- properties:
- env:
- type: array
- items:
- type: object
- properties:
- name:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ capabilities:
+ type: object
+ properties:
+ add:
+ type: array
+ items:
type: string
- description: The environment variable key.
- value:
+ drop:
+ type: array
+ items:
type: string
- description: The environment variable value.
- description: Environment variables which should be applied to the container.
- securityContext:
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
type: object
properties:
- allowPrivilegeEscalation:
- type: boolean
- appArmorProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- capabilities:
- type: object
- properties:
- add:
- type: array
- items:
- type: string
- drop:
- type: array
- items:
- type: string
- privileged:
- type: boolean
- procMount:
+ level:
type: string
- readOnlyRootFilesystem:
- type: boolean
- runAsGroup:
- type: integer
- runAsNonRoot:
- type: boolean
- runAsUser:
- type: integer
- seLinuxOptions:
- type: object
- properties:
- level:
- type: string
- role:
- type: string
- type:
- type: string
- user:
- type: string
- seccompProfile:
- type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- windowsOptions:
- type: object
- properties:
- gmsaCredentialSpec:
- type: string
- gmsaCredentialSpecName:
- type: string
- hostProcess:
- type: boolean
- runAsUserName:
- type: string
- description: Security context for the container.
- description: Template for the Kafka Exporter container.
- serviceAccount:
- type: object
- properties:
- metadata:
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ seccompProfile:
type: object
properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- description: Template for the Kafka Exporter service account.
- description: Customization of deployment templates and pods.
- description: "Configuration of the Kafka Exporter. Kafka Exporter can provide additional metrics, for example lag of consumer group at topic/partition."
- maintenanceTimeWindows:
- type: array
- items:
- type: string
- description: "A list of time windows for maintenance tasks (that is, certificates renewal). Each time window is defined by a cron expression."
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ windowsOptions:
+ type: object
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ description: Security context for the container.
+ volumeMounts:
+ type: array
+ items:
+ type: object
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ recursiveReadOnly:
+ type: string
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ description: Additional volume mounts which should be applied to the container.
+ description: Template for the Kafka Bridge init container.
+ description: Template for Kafka Bridge resources. The template allows users to specify how a `Deployment` and `Pod` is generated.
+ tracing:
+ type: object
+ properties:
+ type:
+ type: string
+ enum:
+ - jaeger
+ - opentelemetry
+ description: "Type of the tracing used. Currently the only supported type is `opentelemetry` for OpenTelemetry tracing. As of Strimzi 0.37.0, `jaeger` type is not supported anymore and this option is ignored."
+ required:
+ - type
+ description: The configuration of tracing in Kafka Bridge.
required:
- - kafka
- description: "The specification of the Kafka and ZooKeeper clusters, and Topic Operator."
+ - bootstrapServers
+ description: The specification of the Kafka Bridge.
status:
type: object
properties:
@@ -13650,137 +13714,165 @@ spec:
reason:
type: string
description: The reason for the condition's last transition (a single word in CamelCase).
- message:
- type: string
- description: Human-readable message indicating details about the condition's last transition.
- description: List of status conditions.
- observedGeneration:
- type: integer
- description: The generation of the CRD that was last reconciled by the operator.
- listeners:
- type: array
- items:
- type: object
- properties:
- type:
- type: string
- description: The name of the listener.
- name:
- type: string
- description: The name of the listener.
- addresses:
- type: array
- items:
- type: object
- properties:
- host:
- type: string
- description: The DNS name or IP address of the Kafka bootstrap service.
- port:
- type: integer
- description: The port of the Kafka bootstrap service.
- description: A list of the addresses for this listener.
- bootstrapServers:
- type: string
- description: A comma-separated list of `host:port` pairs for connecting to the Kafka cluster using this listener.
- certificates:
- type: array
- items:
- type: string
- description: A list of TLS certificates which can be used to verify the identity of the server when connecting to the given listener. Set only for `tls` and `external` listeners.
- description: Addresses of the internal and external listeners.
- kafkaNodePools:
- type: array
- items:
- type: object
- properties:
- name:
+ message:
type: string
- description: The name of the KafkaNodePool used by this Kafka resource.
- description: List of the KafkaNodePools used by this Kafka cluster.
- clusterId:
- type: string
- description: Kafka cluster Id.
- operatorLastSuccessfulVersion:
- type: string
- description: The version of the Strimzi Cluster Operator which performed the last successful reconciliation.
- kafkaVersion:
- type: string
- description: The version of Kafka currently deployed in the cluster.
- kafkaMetadataVersion:
+ description: Human-readable message indicating details about the condition's last transition.
+ description: List of status conditions.
+ observedGeneration:
+ type: integer
+ description: The generation of the CRD that was last reconciled by the operator.
+ url:
type: string
- description: The KRaft metadata.version currently used by the Kafka cluster.
- kafkaMetadataState:
+ description: The URL at which external client applications can access the Kafka Bridge.
+ replicas:
+ type: integer
+ description: The current number of pods being used to provide this resource.
+ labelSelector:
type: string
- enum:
- - ZooKeeper
- - KRaftMigration
- - KRaftDualWriting
- - KRaftPostMigration
- - PreKRaft
- - KRaft
- description: "Defines where cluster metadata are stored. Possible values are: ZooKeeper if the metadata are stored in ZooKeeper; KRaftMigration if the controllers are connected to ZooKeeper, brokers are being rolled with Zookeeper migration enabled and connection information to controllers, and the metadata migration process is running; KRaftDualWriting if the metadata migration process finished and the cluster is in dual-write mode; KRaftPostMigration if the brokers are fully KRaft-based but controllers being rolled to disconnect from ZooKeeper; PreKRaft if brokers and controller are fully KRaft-based, metadata are stored in KRaft, but ZooKeeper must be deleted; KRaft if the metadata are stored in KRaft."
- description: "The status of the Kafka and ZooKeeper clusters, and Topic Operator."
+ description: Label selector for pods providing this resource.
+ description: The status of the Kafka Bridge.
---
-apiVersion: v1
-kind: ServiceAccount
+apiVersion: apps/v1
+kind: Deployment
metadata:
name: strimzi-cluster-operator
labels:
app: strimzi
-
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
- name: strimzi-cluster-operator-entity-operator-delegation
- labels:
- app: strimzi
-# The Entity Operator cluster role must be bound to the cluster operator service account so that it can delegate the cluster role to the Entity Operator.
-# This must be done to avoid escalating privileges which would be blocked by Kubernetes.
-subjects:
- - kind: ServiceAccount
- name: strimzi-cluster-operator
- namespace: myproject
-roleRef:
- kind: ClusterRole
- name: strimzi-entity-operator
- apiGroup: rbac.authorization.k8s.io
-
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
- name: strimzi-cluster-operator-watched
- labels:
- app: strimzi
-subjects:
- - kind: ServiceAccount
- name: strimzi-cluster-operator
- namespace: myproject
-roleRef:
- kind: ClusterRole
- name: strimzi-cluster-operator-watched
- apiGroup: rbac.authorization.k8s.io
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ name: strimzi-cluster-operator
+ strimzi.io/kind: cluster-operator
+ template:
+ metadata:
+ labels:
+ name: strimzi-cluster-operator
+ strimzi.io/kind: cluster-operator
+ spec:
+ serviceAccountName: strimzi-cluster-operator
+ volumes:
+ - name: strimzi-tmp
+ emptyDir:
+ medium: Memory
+ sizeLimit: 1Mi
+ - name: co-config-volume
+ configMap:
+ name: strimzi-cluster-operator
+ containers:
+ - name: strimzi-cluster-operator
+ image: quay.io/strimzi/operator:0.43.0
+ ports:
+ - containerPort: 8080
+ name: http
+ args:
+ - /opt/strimzi/bin/cluster_operator_run.sh
+ volumeMounts:
+ - name: strimzi-tmp
+ mountPath: /tmp
+ - name: co-config-volume
+ mountPath: /opt/strimzi/custom-config/
+ env:
+ - name: STRIMZI_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: STRIMZI_FULL_RECONCILIATION_INTERVAL_MS
+ value: "120000"
+ - name: STRIMZI_OPERATION_TIMEOUT_MS
+ value: "300000"
+ - name: STRIMZI_DEFAULT_KAFKA_EXPORTER_IMAGE
+ value: quay.io/strimzi/kafka:0.43.0-kafka-3.8.0
+ - name: STRIMZI_DEFAULT_CRUISE_CONTROL_IMAGE
+ value: quay.io/strimzi/kafka:0.43.0-kafka-3.8.0
+ - name: STRIMZI_KAFKA_IMAGES
+ value: |
+ 3.7.0=quay.io/strimzi/kafka:0.43.0-kafka-3.7.0
+ 3.7.1=quay.io/strimzi/kafka:0.43.0-kafka-3.7.1
+ 3.8.0=quay.io/strimzi/kafka:0.43.0-kafka-3.8.0
+ - name: STRIMZI_KAFKA_CONNECT_IMAGES
+ value: |
+ 3.7.0=quay.io/strimzi/kafka:0.43.0-kafka-3.7.0
+ 3.7.1=quay.io/strimzi/kafka:0.43.0-kafka-3.7.1
+ 3.8.0=quay.io/strimzi/kafka:0.43.0-kafka-3.8.0
+ - name: STRIMZI_KAFKA_MIRROR_MAKER_IMAGES
+ value: |
+ 3.7.0=quay.io/strimzi/kafka:0.43.0-kafka-3.7.0
+ 3.7.1=quay.io/strimzi/kafka:0.43.0-kafka-3.7.1
+ 3.8.0=quay.io/strimzi/kafka:0.43.0-kafka-3.8.0
+ - name: STRIMZI_KAFKA_MIRROR_MAKER_2_IMAGES
+ value: |
+ 3.7.0=quay.io/strimzi/kafka:0.43.0-kafka-3.7.0
+ 3.7.1=quay.io/strimzi/kafka:0.43.0-kafka-3.7.1
+ 3.8.0=quay.io/strimzi/kafka:0.43.0-kafka-3.8.0
+ - name: STRIMZI_DEFAULT_TOPIC_OPERATOR_IMAGE
+ value: quay.io/strimzi/operator:0.43.0
+ - name: STRIMZI_DEFAULT_USER_OPERATOR_IMAGE
+ value: quay.io/strimzi/operator:0.43.0
+ - name: STRIMZI_DEFAULT_KAFKA_INIT_IMAGE
+ value: quay.io/strimzi/operator:0.43.0
+ - name: STRIMZI_DEFAULT_KAFKA_BRIDGE_IMAGE
+ value: quay.io/strimzi/kafka-bridge:0.30.0
+ - name: STRIMZI_DEFAULT_KANIKO_EXECUTOR_IMAGE
+ value: quay.io/strimzi/kaniko-executor:0.43.0
+ - name: STRIMZI_DEFAULT_MAVEN_BUILDER
+ value: quay.io/strimzi/maven-builder:0.43.0
+ - name: STRIMZI_OPERATOR_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: STRIMZI_FEATURE_GATES
+ value: ""
+ - name: STRIMZI_LEADER_ELECTION_ENABLED
+ value: "true"
+ - name: STRIMZI_LEADER_ELECTION_LEASE_NAME
+ value: "strimzi-cluster-operator"
+ - name: STRIMZI_LEADER_ELECTION_LEASE_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: STRIMZI_LEADER_ELECTION_IDENTITY
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ livenessProbe:
+ httpGet:
+ path: /healthy
+ port: http
+ initialDelaySeconds: 10
+ periodSeconds: 30
+ readinessProbe:
+ httpGet:
+ path: /ready
+ port: http
+ initialDelaySeconds: 10
+ periodSeconds: 30
+ resources:
+ limits:
+ cpu: 1000m
+ memory: 384Mi
+ requests:
+ cpu: 200m
+ memory: 384Mi
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
- name: kafkarebalances.kafka.strimzi.io
+ name: kafkamirrormaker2s.kafka.strimzi.io
labels:
app: strimzi
strimzi.io/crd-install: "true"
spec:
group: kafka.strimzi.io
names:
- kind: KafkaRebalance
- listKind: KafkaRebalanceList
- singular: kafkarebalance
- plural: kafkarebalances
+ kind: KafkaMirrorMaker2
+ listKind: KafkaMirrorMaker2List
+ singular: kafkamirrormaker2
+ plural: kafkamirrormaker2s
shortNames:
- - kr
+ - kmm2
categories:
- strimzi
scope: Namespaced
@@ -13792,374 +13884,1546 @@ spec:
storage: true
subresources:
status: {}
+ scale:
+ specReplicasPath: .spec.replicas
+ statusReplicasPath: .status.replicas
+ labelSelectorPath: .status.labelSelector
additionalPrinterColumns:
- - name: Cluster
- description: The name of the Kafka cluster this resource rebalances
- jsonPath: .metadata.labels.strimzi\.io/cluster
- type: string
- - name: PendingProposal
- description: A proposal has been requested from Cruise Control
- jsonPath: ".status.conditions[?(@.type==\"PendingProposal\")].status"
- type: string
- - name: ProposalReady
- description: A proposal is ready and waiting for approval
- jsonPath: ".status.conditions[?(@.type==\"ProposalReady\")].status"
- type: string
- - name: Rebalancing
- description: Cruise Control is doing the rebalance
- jsonPath: ".status.conditions[?(@.type==\"Rebalancing\")].status"
- type: string
+ - name: Desired replicas
+ description: The desired number of Kafka MirrorMaker 2 replicas
+ jsonPath: .spec.replicas
+ type: integer
- name: Ready
- description: The rebalance is complete
- jsonPath: ".status.conditions[?(@.type==\"Ready\")].status"
- type: string
- - name: NotReady
- description: There is an error on the custom resource
- jsonPath: ".status.conditions[?(@.type==\"NotReady\")].status"
- type: string
- - name: Stopped
- description: Processing the proposal or running rebalancing was stopped
- jsonPath: ".status.conditions[?(@.type==\"Stopped\")].status"
- type: string
- schema:
- openAPIV3Schema:
- type: object
- properties:
- apiVersion:
- type: string
- description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources"
- kind:
- type: string
- description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
- metadata:
- type: object
- spec:
- type: object
- properties:
- mode:
- type: string
- enum:
- - full
- - add-brokers
- - remove-brokers
- description: "Mode to run the rebalancing. The supported modes are `full`, `add-brokers`, `remove-brokers`.\nIf not specified, the `full` mode is used by default. \n\n* `full` mode runs the rebalancing across all the brokers in the cluster.\n* `add-brokers` mode can be used after scaling up the cluster to move some replicas to the newly added brokers.\n* `remove-brokers` mode can be used before scaling down the cluster to move replicas out of the brokers to be removed.\n"
- brokers:
- type: array
- items:
- type: integer
- description: The list of newly added brokers in case of scaling up or the ones to be removed in case of scaling down to use for rebalancing. This list can be used only with rebalancing mode `add-brokers` and `removed-brokers`. It is ignored with `full` mode.
- goals:
- type: array
- items:
- type: string
- description: "A list of goals, ordered by decreasing priority, to use for generating and executing the rebalance proposal. The supported goals are available at https://github.com/linkedin/cruise-control#goals. If an empty goals list is provided, the goals declared in the default.goals Cruise Control configuration parameter are used."
- skipHardGoalCheck:
- type: boolean
- description: Whether to allow the hard goals specified in the Kafka CR to be skipped in optimization proposal generation. This can be useful when some of those hard goals are preventing a balance solution being found. Default is false.
- rebalanceDisk:
- type: boolean
- description: "Enables intra-broker disk balancing, which balances disk space utilization between disks on the same broker. Only applies to Kafka deployments that use JBOD storage with multiple disks. When enabled, inter-broker balancing is disabled. Default is false."
- excludedTopics:
+ description: The state of the custom resource
+ jsonPath: ".status.conditions[?(@.type==\"Ready\")].status"
+ type: string
+ schema:
+ openAPIV3Schema:
+ type: object
+ properties:
+ apiVersion:
+ type: string
+ description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources"
+ kind:
+ type: string
+ description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
+ metadata:
+ type: object
+ spec:
+ type: object
+ properties:
+ version:
type: string
- description: A regular expression where any matching topics will be excluded from the calculation of optimization proposals. This expression will be parsed by the java.util.regex.Pattern class; for more information on the supported format consult the documentation for that class.
- concurrentPartitionMovementsPerBroker:
- type: integer
- minimum: 0
- description: The upper bound of ongoing partition replica movements going into/out of each broker. Default is 5.
- concurrentIntraBrokerPartitionMovements:
- type: integer
- minimum: 0
- description: The upper bound of ongoing partition replica movements between disks within each broker. Default is 2.
- concurrentLeaderMovements:
- type: integer
- minimum: 0
- description: The upper bound of ongoing partition leadership movements. Default is 1000.
- replicationThrottle:
+ description: The Kafka Connect version. Defaults to the latest version. Consult the user documentation to understand the process required to upgrade or downgrade the version.
+ replicas:
type: integer
- minimum: 0
- description: "The upper bound, in bytes per second, on the bandwidth used to move replicas. There is no limit by default."
- replicaMovementStrategies:
+ description: The number of pods in the Kafka Connect group. Defaults to `3`.
+ image:
+ type: string
+ description: "The container image used for Kafka Connect pods. If no image name is explicitly specified, it is determined based on the `spec.version` configuration. The image names are specifically mapped to corresponding versions in the Cluster Operator configuration."
+ connectCluster:
+ type: string
+ description: The cluster alias used for Kafka Connect. The value must match the alias of the *target* Kafka cluster as specified in the `spec.clusters` configuration. The target Kafka cluster is used by the underlying Kafka Connect framework for its internal topics.
+ clusters:
type: array
items:
- type: string
- description: "A list of strategy class names used to determine the execution order for the replica movements in the generated optimization proposal. By default BaseReplicaMovementStrategy is used, which will execute the replica movements in the order that they were generated."
- description: The specification of the Kafka rebalance.
- status:
- type: object
- properties:
- conditions:
+ type: object
+ properties:
+ alias:
+ type: string
+ pattern: "^[a-zA-Z0-9\\._\\-]{1,100}$"
+ description: Alias used to reference the Kafka cluster.
+ bootstrapServers:
+ type: string
+ description: A comma-separated list of `host:port` pairs for establishing the connection to the Kafka cluster.
+ tls:
+ type: object
+ properties:
+ trustedCertificates:
+ type: array
+ items:
+ type: object
+ properties:
+ secretName:
+ type: string
+ description: The name of the Secret containing the certificate.
+ certificate:
+ type: string
+ description: The name of the file certificate in the secret.
+ pattern:
+ type: string
+ description: "Pattern for the certificate files in the secret. Use the link:https://en.wikipedia.org/wiki/Glob_(programming)[_glob syntax_] for the pattern. All files in the secret that match the pattern are used."
+ oneOf:
+ - properties:
+ certificate: {}
+ required:
+ - certificate
+ - properties:
+ pattern: {}
+ required:
+ - pattern
+ required:
+ - secretName
+ description: Trusted certificates for TLS connection.
+ description: TLS configuration for connecting MirrorMaker 2 connectors to a cluster.
+ authentication:
+ type: object
+ properties:
+ accessToken:
+ type: object
+ properties:
+ key:
+ type: string
+ description: The key under which the secret value is stored in the Kubernetes Secret.
+ secretName:
+ type: string
+ description: The name of the Kubernetes Secret containing the secret value.
+ required:
+ - key
+ - secretName
+ description: Link to Kubernetes Secret containing the access token which was obtained from the authorization server.
+ accessTokenIsJwt:
+ type: boolean
+ description: Configure whether access token should be treated as JWT. This should be set to `false` if the authorization server returns opaque tokens. Defaults to `true`.
+ accessTokenLocation:
+ type: string
+ description: Path to the token file containing an access token to be used for authentication.
+ audience:
+ type: string
+ description: "OAuth audience to use when authenticating against the authorization server. Some authorization servers require the audience to be explicitly set. The possible values depend on how the authorization server is configured. By default, `audience` is not specified when performing the token endpoint request."
+ certificateAndKey:
+ type: object
+ properties:
+ secretName:
+ type: string
+ description: The name of the Secret containing the certificate.
+ certificate:
+ type: string
+ description: The name of the file certificate in the Secret.
+ key:
+ type: string
+ description: The name of the private key in the Secret.
+ required:
+ - secretName
+ - certificate
+ - key
+ description: Reference to the `Secret` which holds the certificate and private key pair.
+ clientAssertion:
+ type: object
+ properties:
+ key:
+ type: string
+ description: The key under which the secret value is stored in the Kubernetes Secret.
+ secretName:
+ type: string
+ description: The name of the Kubernetes Secret containing the secret value.
+ required:
+ - key
+ - secretName
+ description: Link to Kubernetes secret containing the client assertion which was manually configured for the client.
+ clientAssertionLocation:
+ type: string
+ description: Path to the file containing the client assertion to be used for authentication.
+ clientAssertionType:
+ type: string
+ description: "The client assertion type. If not set, and either `clientAssertion` or `clientAssertionLocation` is configured, this value defaults to `urn:ietf:params:oauth:client-assertion-type:jwt-bearer`."
+ clientId:
+ type: string
+ description: OAuth Client ID which the Kafka client can use to authenticate against the OAuth server and use the token endpoint URI.
+ clientSecret:
+ type: object
+ properties:
+ key:
+ type: string
+ description: The key under which the secret value is stored in the Kubernetes Secret.
+ secretName:
+ type: string
+ description: The name of the Kubernetes Secret containing the secret value.
+ required:
+ - key
+ - secretName
+ description: Link to Kubernetes Secret containing the OAuth client secret which the Kafka client can use to authenticate against the OAuth server and use the token endpoint URI.
+ connectTimeoutSeconds:
+ type: integer
+ description: "The connect timeout in seconds when connecting to authorization server. If not set, the effective connect timeout is 60 seconds."
+ disableTlsHostnameVerification:
+ type: boolean
+ description: Enable or disable TLS hostname verification. Default value is `false`.
+ enableMetrics:
+ type: boolean
+ description: Enable or disable OAuth metrics. Default value is `false`.
+ httpRetries:
+ type: integer
+ description: "The maximum number of retries to attempt if an initial HTTP request fails. If not set, the default is to not attempt any retries."
+ httpRetryPauseMs:
+ type: integer
+ description: "The pause to take before retrying a failed HTTP request. If not set, the default is to not pause at all but to immediately repeat a request."
+ includeAcceptHeader:
+ type: boolean
+ description: Whether the Accept header should be set in requests to the authorization servers. The default value is `true`.
+ maxTokenExpirySeconds:
+ type: integer
+ description: Set or limit time-to-live of the access tokens to the specified number of seconds. This should be set if the authorization server returns opaque tokens.
+ passwordSecret:
+ type: object
+ properties:
+ secretName:
+ type: string
+ description: The name of the Secret containing the password.
+ password:
+ type: string
+ description: The name of the key in the Secret under which the password is stored.
+ required:
+ - secretName
+ - password
+ description: Reference to the `Secret` which holds the password.
+ readTimeoutSeconds:
+ type: integer
+ description: "The read timeout in seconds when connecting to authorization server. If not set, the effective read timeout is 60 seconds."
+ refreshToken:
+ type: object
+ properties:
+ key:
+ type: string
+ description: The key under which the secret value is stored in the Kubernetes Secret.
+ secretName:
+ type: string
+ description: The name of the Kubernetes Secret containing the secret value.
+ required:
+ - key
+ - secretName
+ description: Link to Kubernetes Secret containing the refresh token which can be used to obtain access token from the authorization server.
+ saslExtensions:
+ additionalProperties:
+ type: string
+ type: object
+ description: SASL extensions parameters.
+ scope:
+ type: string
+ description: OAuth scope to use when authenticating against the authorization server. Some authorization servers require this to be set. The possible values depend on how authorization server is configured. By default `scope` is not specified when doing the token endpoint request.
+ tlsTrustedCertificates:
+ type: array
+ items:
+ type: object
+ properties:
+ secretName:
+ type: string
+ description: The name of the Secret containing the certificate.
+ certificate:
+ type: string
+ description: The name of the file certificate in the secret.
+ pattern:
+ type: string
+ description: "Pattern for the certificate files in the secret. Use the link:https://en.wikipedia.org/wiki/Glob_(programming)[_glob syntax_] for the pattern. All files in the secret that match the pattern are used."
+ oneOf:
+ - properties:
+ certificate: {}
+ required:
+ - certificate
+ - properties:
+ pattern: {}
+ required:
+ - pattern
+ required:
+ - secretName
+ description: Trusted certificates for TLS connection to the OAuth server.
+ tokenEndpointUri:
+ type: string
+ description: Authorization server token endpoint URI.
+ type:
+ type: string
+ enum:
+ - tls
+ - scram-sha-256
+ - scram-sha-512
+ - plain
+ - oauth
+ description: "Authentication type. Currently the supported types are `tls`, `scram-sha-256`, `scram-sha-512`, `plain`, and 'oauth'. `scram-sha-256` and `scram-sha-512` types use SASL SCRAM-SHA-256 and SASL SCRAM-SHA-512 Authentication, respectively. `plain` type uses SASL PLAIN Authentication. `oauth` type uses SASL OAUTHBEARER Authentication. The `tls` type uses TLS Client Authentication. The `tls` type is supported only over TLS connections."
+ username:
+ type: string
+ description: Username used for the authentication.
+ required:
+ - type
+ description: Authentication configuration for connecting to the cluster.
+ config:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ description: "The MirrorMaker 2 cluster config. Properties with the following prefixes cannot be set: ssl., sasl., security., listeners, plugin.path, rest., bootstrap.servers, consumer.interceptor.classes, producer.interceptor.classes (with the exception of: ssl.endpoint.identification.algorithm, ssl.cipher.suites, ssl.protocol, ssl.enabled.protocols)."
+ required:
+ - alias
+ - bootstrapServers
+ description: Kafka clusters for mirroring.
+ mirrors:
type: array
items:
type: object
properties:
- type:
+ sourceCluster:
type: string
- description: "The unique identifier of a condition, used to distinguish between other conditions in the resource."
- status:
+ description: The alias of the source cluster used by the Kafka MirrorMaker 2 connectors. The alias must match a cluster in the list at `spec.clusters`.
+ targetCluster:
type: string
- description: "The status of the condition, either True, False or Unknown."
- lastTransitionTime:
+ description: The alias of the target cluster used by the Kafka MirrorMaker 2 connectors. The alias must match a cluster in the list at `spec.clusters`.
+ sourceConnector:
+ type: object
+ properties:
+ tasksMax:
+ type: integer
+ minimum: 1
+ description: The maximum number of tasks for the Kafka Connector.
+ pause:
+ type: boolean
+ description: Whether the connector should be paused. Defaults to false.
+ config:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ description: "The Kafka Connector configuration. The following properties cannot be set: name, connector.class, tasks.max."
+ state:
+ type: string
+ enum:
+ - paused
+ - stopped
+ - running
+ description: The state the connector should be in. Defaults to running.
+ autoRestart:
+ type: object
+ properties:
+ enabled:
+ type: boolean
+ description: Whether automatic restart for failed connectors and tasks should be enabled or disabled.
+ maxRestarts:
+ type: integer
+ description: "The maximum number of connector restarts that the operator will try. If the connector remains in a failed state after reaching this limit, it must be restarted manually by the user. Defaults to an unlimited number of restarts."
+ description: Automatic restart of connector and tasks configuration.
+ description: The specification of the Kafka MirrorMaker 2 source connector.
+ heartbeatConnector:
+ type: object
+ properties:
+ tasksMax:
+ type: integer
+ minimum: 1
+ description: The maximum number of tasks for the Kafka Connector.
+ pause:
+ type: boolean
+ description: Whether the connector should be paused. Defaults to false.
+ config:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ description: "The Kafka Connector configuration. The following properties cannot be set: name, connector.class, tasks.max."
+ state:
+ type: string
+ enum:
+ - paused
+ - stopped
+ - running
+ description: The state the connector should be in. Defaults to running.
+ autoRestart:
+ type: object
+ properties:
+ enabled:
+ type: boolean
+ description: Whether automatic restart for failed connectors and tasks should be enabled or disabled.
+ maxRestarts:
+ type: integer
+ description: "The maximum number of connector restarts that the operator will try. If the connector remains in a failed state after reaching this limit, it must be restarted manually by the user. Defaults to an unlimited number of restarts."
+ description: Automatic restart of connector and tasks configuration.
+ description: The specification of the Kafka MirrorMaker 2 heartbeat connector.
+ checkpointConnector:
+ type: object
+ properties:
+ tasksMax:
+ type: integer
+ minimum: 1
+ description: The maximum number of tasks for the Kafka Connector.
+ pause:
+ type: boolean
+ description: Whether the connector should be paused. Defaults to false.
+ config:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ description: "The Kafka Connector configuration. The following properties cannot be set: name, connector.class, tasks.max."
+ state:
+ type: string
+ enum:
+ - paused
+ - stopped
+ - running
+ description: The state the connector should be in. Defaults to running.
+ autoRestart:
+ type: object
+ properties:
+ enabled:
+ type: boolean
+ description: Whether automatic restart for failed connectors and tasks should be enabled or disabled.
+ maxRestarts:
+ type: integer
+ description: "The maximum number of connector restarts that the operator will try. If the connector remains in a failed state after reaching this limit, it must be restarted manually by the user. Defaults to an unlimited number of restarts."
+ description: Automatic restart of connector and tasks configuration.
+ description: The specification of the Kafka MirrorMaker 2 checkpoint connector.
+ topicsPattern:
type: string
- description: "Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone."
- reason:
+ description: "A regular expression matching the topics to be mirrored, for example, \"topic1\\|topic2\\|topic3\". Comma-separated lists are also supported."
+ topicsBlacklistPattern:
type: string
- description: The reason for the condition's last transition (a single word in CamelCase).
- message:
+ description: A regular expression matching the topics to exclude from mirroring. Comma-separated lists are also supported.
+ topicsExcludePattern:
type: string
- description: Human-readable message indicating details about the condition's last transition.
- description: List of status conditions.
- observedGeneration:
- type: integer
- description: The generation of the CRD that was last reconciled by the operator.
- sessionId:
- type: string
- description: The session identifier for requests to Cruise Control pertaining to this KafkaRebalance resource. This is used by the Kafka Rebalance operator to track the status of ongoing rebalancing operations.
- optimizationResult:
- x-kubernetes-preserve-unknown-fields: true
+ description: A regular expression matching the topics to exclude from mirroring. Comma-separated lists are also supported.
+ groupsPattern:
+ type: string
+ description: A regular expression matching the consumer groups to be mirrored. Comma-separated lists are also supported.
+ groupsBlacklistPattern:
+ type: string
+ description: A regular expression matching the consumer groups to exclude from mirroring. Comma-separated lists are also supported.
+ groupsExcludePattern:
+ type: string
+ description: A regular expression matching the consumer groups to exclude from mirroring. Comma-separated lists are also supported.
+ required:
+ - sourceCluster
+ - targetCluster
+ description: Configuration of the MirrorMaker 2 connectors.
+ resources:
type: object
- description: A JSON object describing the optimization result.
- description: The status of the Kafka rebalance.
-
----
-apiVersion: apiextensions.k8s.io/v1
-kind: CustomResourceDefinition
-metadata:
- name: kafkanodepools.kafka.strimzi.io
- labels:
- app: strimzi
- strimzi.io/crd-install: "true"
-spec:
- group: kafka.strimzi.io
- names:
- kind: KafkaNodePool
- listKind: KafkaNodePoolList
- singular: kafkanodepool
- plural: kafkanodepools
- shortNames:
- - knp
- categories:
- - strimzi
- scope: Namespaced
- conversion:
- strategy: None
- versions:
- - name: v1beta2
- served: true
- storage: true
- subresources:
- status: {}
- scale:
- specReplicasPath: .spec.replicas
- statusReplicasPath: .status.replicas
- labelSelectorPath: .status.labelSelector
- additionalPrinterColumns:
- - name: Desired replicas
- description: The desired number of replicas
- jsonPath: .spec.replicas
- type: integer
- - name: Roles
- description: Roles of the nodes in the pool
- jsonPath: .status.roles
- type: string
- - name: NodeIds
- description: Node IDs used by Kafka nodes in this pool
- jsonPath: .status.nodeIds
- type: string
- schema:
- openAPIV3Schema:
- type: object
- properties:
- apiVersion:
- type: string
- description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources"
- kind:
- type: string
- description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
- metadata:
- type: object
- spec:
- type: object
- properties:
- replicas:
- type: integer
- minimum: 0
- description: The number of pods in the pool.
- storage:
+ properties:
+ claims:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
+ x-kubernetes-int-or-string: true
+ type: object
+ description: The maximum limits for CPU and memory resources and the requested initial resources.
+ livenessProbe:
+ type: object
+ properties:
+ initialDelaySeconds:
+ type: integer
+ minimum: 0
+ description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
+ timeoutSeconds:
+ type: integer
+ minimum: 1
+ description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
+ periodSeconds:
+ type: integer
+ minimum: 1
+ description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
+ successThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
+ failureThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
+ description: Pod liveness checking.
+ readinessProbe:
type: object
properties:
- class:
- type: string
- description: The storage class to use for dynamic volume allocation.
- deleteClaim:
- type: boolean
- description: Specifies if the persistent volume claim has to be deleted when the cluster is un-deployed.
- id:
+ initialDelaySeconds:
type: integer
minimum: 0
- description: Storage identification number. It is mandatory only for storage volumes defined in a storage of type 'jbod'.
- kraftMetadata:
+ description: The initial delay before first the health is first checked. Default to 15 seconds. Minimum value is 0.
+ timeoutSeconds:
+ type: integer
+ minimum: 1
+ description: The timeout for each attempted health check. Default to 5 seconds. Minimum value is 1.
+ periodSeconds:
+ type: integer
+ minimum: 1
+ description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
+ successThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.
+ failureThreshold:
+ type: integer
+ minimum: 1
+ description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
+ description: Pod readiness checking.
+ jvmOptions:
+ type: object
+ properties:
+ "-XX":
+ additionalProperties:
+ type: string
+ type: object
+ description: A map of -XX options to the JVM.
+ "-Xmx":
type: string
- enum:
- - shared
- description: "Specifies whether this volume should be used for storing KRaft metadata. This property is optional. When set, the only currently supported value is `shared`. At most one volume can have this property set."
- overrides:
+ pattern: "^[0-9]+[mMgG]?$"
+ description: -Xmx option to to the JVM.
+ "-Xms":
+ type: string
+ pattern: "^[0-9]+[mMgG]?$"
+ description: -Xms option to to the JVM.
+ gcLoggingEnabled:
+ type: boolean
+ description: Specifies whether the Garbage Collection logging is enabled. The default is false.
+ javaSystemProperties:
type: array
items:
type: object
properties:
- class:
+ name:
type: string
- description: The storage class to use for dynamic volume allocation for this broker.
- broker:
- type: integer
- description: Id of the kafka broker (broker identifier).
- description: Overrides for individual brokers. The `overrides` field allows to specify a different configuration for different brokers.
- selector:
+ description: The system property name.
+ value:
+ type: string
+ description: The system property value.
+ description: A map of additional system properties which will be passed using the `-D` option to the JVM.
+ description: JVM Options for pods.
+ jmxOptions:
+ type: object
+ properties:
+ authentication:
+ type: object
+ properties:
+ type:
+ type: string
+ enum:
+ - password
+ description: Authentication type. Currently the only supported types are `password`.`password` type creates a username and protected port with no TLS.
+ required:
+ - type
+ description: Authentication configuration for connecting to the JMX port.
+ description: JMX Options.
+ logging:
+ type: object
+ properties:
+ loggers:
additionalProperties:
type: string
type: object
- description: Specifies a specific persistent volume to use. It contains key:value pairs representing labels for selecting such a volume.
- size:
- type: string
- description: "When `type=persistent-claim`, defines the size of the persistent volume claim, such as 100Gi. Mandatory when `type=persistent-claim`."
- sizeLimit:
- type: string
- pattern: "^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
- description: "When type=ephemeral, defines the total amount of local storage required for this EmptyDir volume (for example 1Gi)."
- type:
- type: string
- enum:
- - ephemeral
- - persistent-claim
- - jbod
- description: "Storage type, must be either 'ephemeral', 'persistent-claim', or 'jbod'."
- volumes:
- type: array
- items:
- type: object
- properties:
- class:
+ description: A Map from logger name to logger level.
+ type:
+ type: string
+ enum:
+ - inline
+ - external
+ description: "Logging type, must be either 'inline' or 'external'."
+ valueFrom:
+ type: object
+ properties:
+ configMapKeyRef:
+ type: object
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ description: Reference to the key in the ConfigMap containing the configuration.
+ description: '`ConfigMap` entry where the logging configuration is stored. '
+ required:
+ - type
+ description: Logging configuration for Kafka Connect.
+ clientRackInitImage:
+ type: string
+ description: The image of the init container used for initializing the `client.rack`.
+ rack:
+ type: object
+ properties:
+ topologyKey:
+ type: string
+ example: topology.kubernetes.io/zone
+ description: "A key that matches labels assigned to the Kubernetes cluster nodes. The value of the label is used to set a broker's `broker.rack` config, and the `client.rack` config for Kafka Connect or MirrorMaker 2."
+ required:
+ - topologyKey
+ description: Configuration of the node label which will be used as the `client.rack` consumer configuration.
+ metricsConfig:
+ type: object
+ properties:
+ type:
+ type: string
+ enum:
+ - jmxPrometheusExporter
+ description: Metrics type. Only 'jmxPrometheusExporter' supported currently.
+ valueFrom:
+ type: object
+ properties:
+ configMapKeyRef:
+ type: object
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ description: Reference to the key in the ConfigMap containing the configuration.
+ description: 'ConfigMap entry where the Prometheus JMX Exporter configuration is stored. '
+ required:
+ - type
+ - valueFrom
+ description: Metrics configuration.
+ tracing:
+ type: object
+ properties:
+ type:
+ type: string
+ enum:
+ - jaeger
+ - opentelemetry
+ description: "Type of the tracing used. Currently the only supported type is `opentelemetry` for OpenTelemetry tracing. As of Strimzi 0.37.0, `jaeger` type is not supported anymore and this option is ignored."
+ required:
+ - type
+ description: The configuration of tracing in Kafka Connect.
+ template:
+ type: object
+ properties:
+ deployment:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ deploymentStrategy:
+ type: string
+ enum:
+ - RollingUpdate
+ - Recreate
+ description: Pod replacement strategy for deployment configuration changes. Valid values are `RollingUpdate` and `Recreate`. Defaults to `RollingUpdate`.
+ description: Template for Kafka Connect `Deployment`.
+ podSet:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ description: Template for Kafka Connect `StrimziPodSet` resource.
+ pod:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ imagePullSecrets:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: "List of references to secrets in the same namespace to use for pulling any of the images used by this Pod. When the `STRIMZI_IMAGE_PULL_SECRETS` environment variable in Cluster Operator and the `imagePullSecrets` option are specified, only the `imagePullSecrets` variable is used and the `STRIMZI_IMAGE_PULL_SECRETS` variable is ignored."
+ securityContext:
+ type: object
+ properties:
+ appArmorProfile:
+ type: object
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ fsGroup:
+ type: integer
+ fsGroupChangePolicy:
+ type: string
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
+ type: object
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ seccompProfile:
+ type: object
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ supplementalGroups:
+ type: array
+ items:
+ type: integer
+ sysctls:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ value:
+ type: string
+ windowsOptions:
+ type: object
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ description: Configures pod-level security attributes and common container settings.
+ terminationGracePeriodSeconds:
+ type: integer
+ minimum: 0
+ description: "The grace period is the duration in seconds after the processes running in the pod are sent a termination signal, and the time when the processes are forcibly halted with a kill signal. Set this value to longer than the expected cleanup time for your process. Value must be a non-negative integer. A zero value indicates delete immediately. You might need to increase the grace period for very large Kafka clusters, so that the Kafka brokers have enough time to transfer their work to another broker before they are terminated. Defaults to 30 seconds."
+ affinity:
+ type: object
+ properties:
+ nodeAffinity:
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ preference:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchFields:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: object
+ properties:
+ nodeSelectorTerms:
+ type: array
+ items:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchFields:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ podAffinity:
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ podAffinityTerm:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ matchLabelKeys:
+ type: array
+ items:
+ type: string
+ mismatchLabelKeys:
+ type: array
+ items:
+ type: string
+ namespaceSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ matchLabelKeys:
+ type: array
+ items:
+ type: string
+ mismatchLabelKeys:
+ type: array
+ items:
+ type: string
+ namespaceSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ podAntiAffinity:
+ type: object
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ podAffinityTerm:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ matchLabelKeys:
+ type: array
+ items:
+ type: string
+ mismatchLabelKeys:
+ type: array
+ items:
+ type: string
+ namespaceSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ weight:
+ type: integer
+ requiredDuringSchedulingIgnoredDuringExecution:
+ type: array
+ items:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ matchLabelKeys:
+ type: array
+ items:
+ type: string
+ mismatchLabelKeys:
+ type: array
+ items:
+ type: string
+ namespaceSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ namespaces:
+ type: array
+ items:
+ type: string
+ topologyKey:
+ type: string
+ description: The pod's affinity rules.
+ tolerations:
+ type: array
+ items:
+ type: object
+ properties:
+ effect:
+ type: string
+ key:
+ type: string
+ operator:
+ type: string
+ tolerationSeconds:
+ type: integer
+ value:
+ type: string
+ description: The pod's tolerations.
+ topologySpreadConstraints:
+ type: array
+ items:
+ type: object
+ properties:
+ labelSelector:
+ type: object
+ properties:
+ matchExpressions:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ operator:
+ type: string
+ values:
+ type: array
+ items:
+ type: string
+ matchLabels:
+ additionalProperties:
+ type: string
+ type: object
+ matchLabelKeys:
+ type: array
+ items:
+ type: string
+ maxSkew:
+ type: integer
+ minDomains:
+ type: integer
+ nodeAffinityPolicy:
+ type: string
+ nodeTaintsPolicy:
+ type: string
+ topologyKey:
+ type: string
+ whenUnsatisfiable:
+ type: string
+ description: The pod's topology spread constraints.
+ priorityClassName:
+ type: string
+ description: 'The name of the priority class used to assign priority to the pods. '
+ schedulerName:
+ type: string
+ description: "The name of the scheduler used to dispatch this `Pod`. If not specified, the default scheduler will be used."
+ hostAliases:
+ type: array
+ items:
+ type: object
+ properties:
+ hostnames:
+ type: array
+ items:
+ type: string
+ ip:
+ type: string
+ description: The pod's HostAliases. HostAliases is an optional list of hosts and IPs that will be injected into the Pod's hosts file if specified.
+ enableServiceLinks:
+ type: boolean
+ description: Indicates whether information about services should be injected into Pod's environment variables.
+ tmpDirSizeLimit:
+ type: string
+ pattern: "^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
+ description: "Defines the total amount of pod memory allocated for the temporary `EmptyDir` volume `/tmp`. Specify the allocation in memory units, for example, `100Mi` for 100 mebibytes. Default value is `5Mi`. The `/tmp` volume is backed by pod memory, not disk storage, so avoid setting a high value as it consumes pod memory resources."
+ volumes:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: Name to use for the volume. Required.
+ secret:
+ type: object
+ properties:
+ defaultMode:
+ type: integer
+ items:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ optional:
+ type: boolean
+ secretName:
+ type: string
+ description: Secret to use populate the volume.
+ configMap:
+ type: object
+ properties:
+ defaultMode:
+ type: integer
+ items:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ description: ConfigMap to use to populate the volume.
+ emptyDir:
+ type: object
+ properties:
+ medium:
+ type: string
+ sizeLimit:
+ type: object
+ properties:
+ amount:
+ type: string
+ format:
+ type: string
+ description: EmptyDir to use to populate the volume.
+ persistentVolumeClaim:
+ type: object
+ properties:
+ claimName:
+ type: string
+ readOnly:
+ type: boolean
+ description: PersistentVolumeClaim object to use to populate the volume.
+ oneOf:
+ - properties:
+ secret: {}
+ configMap: {}
+ emptyDir: {}
+ persistentVolumeClaim: {}
+ required: []
+ description: Additional volumes that can be mounted to the pod.
+ description: Template for Kafka Connect `Pods`.
+ apiService:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ ipFamilyPolicy:
+ type: string
+ enum:
+ - SingleStack
+ - PreferDualStack
+ - RequireDualStack
+ description: "Specifies the IP Family Policy used by the service. Available options are `SingleStack`, `PreferDualStack` and `RequireDualStack`. `SingleStack` is for a single IP family. `PreferDualStack` is for two IP families on dual-stack configured clusters or a single IP family on single-stack clusters. `RequireDualStack` fails unless there are two IP families on dual-stack configured clusters. If unspecified, Kubernetes will choose the default value based on the service type."
+ ipFamilies:
+ type: array
+ items:
type: string
- description: The storage class to use for dynamic volume allocation.
- deleteClaim:
- type: boolean
- description: Specifies if the persistent volume claim has to be deleted when the cluster is un-deployed.
- id:
- type: integer
- minimum: 0
- description: Storage identification number. Mandatory for storage volumes defined with a `jbod` storage type configuration.
- kraftMetadata:
+ enum:
+ - IPv4
+ - IPv6
+ description: "Specifies the IP Families used by the service. Available options are `IPv4` and `IPv6`. If unspecified, Kubernetes will choose the default value based on the `ipFamilyPolicy` setting."
+ description: Template for Kafka Connect API `Service`.
+ headlessService:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ ipFamilyPolicy:
+ type: string
+ enum:
+ - SingleStack
+ - PreferDualStack
+ - RequireDualStack
+ description: "Specifies the IP Family Policy used by the service. Available options are `SingleStack`, `PreferDualStack` and `RequireDualStack`. `SingleStack` is for a single IP family. `PreferDualStack` is for two IP families on dual-stack configured clusters or a single IP family on single-stack clusters. `RequireDualStack` fails unless there are two IP families on dual-stack configured clusters. If unspecified, Kubernetes will choose the default value based on the service type."
+ ipFamilies:
+ type: array
+ items:
type: string
enum:
- - shared
- description: "Specifies whether this volume should be used for storing KRaft metadata. This property is optional. When set, the only currently supported value is `shared`. At most one volume can have this property set."
- overrides:
- type: array
- items:
+ - IPv4
+ - IPv6
+ description: "Specifies the IP Families used by the service. Available options are `IPv4` and `IPv6`. If unspecified, Kubernetes will choose the default value based on the `ipFamilyPolicy` setting."
+ description: Template for Kafka Connect headless `Service`.
+ connectContainer:
+ type: object
+ properties:
+ env:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: The environment variable key.
+ value:
+ type: string
+ description: The environment variable value.
+ description: Environment variables which should be applied to the container.
+ securityContext:
+ type: object
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ appArmorProfile:
+ type: object
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ capabilities:
+ type: object
+ properties:
+ add:
+ type: array
+ items:
+ type: string
+ drop:
+ type: array
+ items:
+ type: string
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
+ type: object
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ seccompProfile:
+ type: object
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ windowsOptions:
+ type: object
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ description: Security context for the container.
+ volumeMounts:
+ type: array
+ items:
+ type: object
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ recursiveReadOnly:
+ type: string
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ description: Additional volume mounts which should be applied to the container.
+ description: Template for the Kafka Connect container.
+ initContainer:
+ type: object
+ properties:
+ env:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: The environment variable key.
+ value:
+ type: string
+ description: The environment variable value.
+ description: Environment variables which should be applied to the container.
+ securityContext:
+ type: object
+ properties:
+ allowPrivilegeEscalation:
+ type: boolean
+ appArmorProfile:
+ type: object
+ properties:
+ localhostProfile:
+ type: string
+ type:
+ type: string
+ capabilities:
+ type: object
+ properties:
+ add:
+ type: array
+ items:
+ type: string
+ drop:
+ type: array
+ items:
+ type: string
+ privileged:
+ type: boolean
+ procMount:
+ type: string
+ readOnlyRootFilesystem:
+ type: boolean
+ runAsGroup:
+ type: integer
+ runAsNonRoot:
+ type: boolean
+ runAsUser:
+ type: integer
+ seLinuxOptions:
+ type: object
+ properties:
+ level:
+ type: string
+ role:
+ type: string
+ type:
+ type: string
+ user:
+ type: string
+ seccompProfile:
type: object
properties:
- class:
+ localhostProfile:
type: string
- description: The storage class to use for dynamic volume allocation for this broker.
- broker:
- type: integer
- description: Id of the kafka broker (broker identifier).
- description: Overrides for individual brokers. The `overrides` field allows to specify a different configuration for different brokers.
- selector:
- additionalProperties:
- type: string
+ type:
+ type: string
+ windowsOptions:
+ type: object
+ properties:
+ gmsaCredentialSpec:
+ type: string
+ gmsaCredentialSpecName:
+ type: string
+ hostProcess:
+ type: boolean
+ runAsUserName:
+ type: string
+ description: Security context for the container.
+ volumeMounts:
+ type: array
+ items:
type: object
- description: Specifies a specific persistent volume to use. It contains key:value pairs representing labels for selecting such a volume.
- size:
- type: string
- description: "When `type=persistent-claim`, defines the size of the persistent volume claim, such as 100Gi. Mandatory when `type=persistent-claim`."
- sizeLimit:
- type: string
- pattern: "^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
- description: "When type=ephemeral, defines the total amount of local storage required for this EmptyDir volume (for example 1Gi)."
- type:
- type: string
- enum:
- - ephemeral
- - persistent-claim
- description: "Storage type, must be either 'ephemeral' or 'persistent-claim'."
- required:
- - type
- description: List of volumes as Storage objects representing the JBOD disks array.
- required:
- - type
- description: Storage configuration (disk). Cannot be updated.
- roles:
- type: array
- items:
- type: string
- enum:
- - controller
- - broker
- description: "The roles that the nodes in this pool will have when KRaft mode is enabled. Supported values are 'broker' and 'controller'. This field is required. When KRaft mode is disabled, the only allowed value if `broker`."
- resources:
- type: object
- properties:
- claims:
- type: array
- items:
- type: object
- properties:
- name:
- type: string
- limits:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
- x-kubernetes-int-or-string: true
- type: object
- requests:
- additionalProperties:
- anyOf:
- - type: integer
- - type: string
- pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
- x-kubernetes-int-or-string: true
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ recursiveReadOnly:
+ type: string
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ description: Additional volume mounts which should be applied to the container.
+ description: Template for the Kafka init container.
+ podDisruptionBudget:
type: object
- description: CPU and memory resources to reserve.
- jvmOptions:
- type: object
- properties:
- "-XX":
- additionalProperties:
- type: string
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata to apply to the `PodDisruptionBudgetTemplate` resource.
+ maxUnavailable:
+ type: integer
+ minimum: 0
+ description: "Maximum number of unavailable pods to allow automatic Pod eviction. A Pod eviction is allowed when the `maxUnavailable` number of pods or fewer are unavailable after the eviction. Setting this value to 0 prevents all voluntary evictions, so the pods must be evicted manually. Defaults to 1."
+ description: Template for Kafka Connect `PodDisruptionBudget`.
+ serviceAccount:
type: object
- description: A map of -XX options to the JVM.
- "-Xmx":
- type: string
- pattern: "^[0-9]+[mMgG]?$"
- description: -Xmx option to to the JVM.
- "-Xms":
- type: string
- pattern: "^[0-9]+[mMgG]?$"
- description: -Xms option to to the JVM.
- gcLoggingEnabled:
- type: boolean
- description: Specifies whether the Garbage Collection logging is enabled. The default is false.
- javaSystemProperties:
- type: array
- items:
- type: object
- properties:
- name:
- type: string
- description: The system property name.
- value:
- type: string
- description: The system property value.
- description: A map of additional system properties which will be passed using the `-D` option to the JVM.
- description: JVM Options for pods.
- template:
- type: object
- properties:
- podSet:
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ description: Template for the Kafka Connect service account.
+ clusterRoleBinding:
type: object
properties:
metadata:
@@ -14176,8 +15440,8 @@ spec:
type: object
description: Annotations added to the Kubernetes resource.
description: Metadata applied to the resource.
- description: Template for Kafka `StrimziPodSet` resource.
- pod:
+ description: Template for the Kafka Connect ClusterRoleBinding.
+ buildPod:
type: object
properties:
metadata:
@@ -14655,106 +15919,113 @@ spec:
type: string
whenUnsatisfiable:
type: string
- description: The pod's topology spread constraints.
- priorityClassName:
- type: string
- description: 'The name of the priority class used to assign priority to the pods. '
- schedulerName:
- type: string
- description: "The name of the scheduler used to dispatch this `Pod`. If not specified, the default scheduler will be used."
- hostAliases:
- type: array
- items:
- type: object
- properties:
- hostnames:
- type: array
- items:
- type: string
- ip:
- type: string
- description: The pod's HostAliases. HostAliases is an optional list of hosts and IPs that will be injected into the Pod's hosts file if specified.
- enableServiceLinks:
- type: boolean
- description: Indicates whether information about services should be injected into Pod's environment variables.
- tmpDirSizeLimit:
- type: string
- pattern: "^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
- description: Defines the total amount (for example `1Gi`) of local storage required for temporary EmptyDir volume (`/tmp`). Default value is `5Mi`.
- description: Template for Kafka `Pods`.
- perPodService:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- description: Template for Kafka per-pod `Services` used for access from outside of Kubernetes.
- perPodRoute:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- description: Template for Kafka per-pod `Routes` used for access from outside of OpenShift.
- perPodIngress:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
- type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
- type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- description: Template for Kafka per-pod `Ingress` used for access from outside of Kubernetes.
- persistentVolumeClaim:
- type: object
- properties:
- metadata:
- type: object
- properties:
- labels:
- additionalProperties:
+ description: The pod's topology spread constraints.
+ priorityClassName:
+ type: string
+ description: 'The name of the priority class used to assign priority to the pods. '
+ schedulerName:
+ type: string
+ description: "The name of the scheduler used to dispatch this `Pod`. If not specified, the default scheduler will be used."
+ hostAliases:
+ type: array
+ items:
+ type: object
+ properties:
+ hostnames:
+ type: array
+ items:
+ type: string
+ ip:
type: string
- type: object
- description: Labels added to the Kubernetes resource.
- annotations:
- additionalProperties:
+ description: The pod's HostAliases. HostAliases is an optional list of hosts and IPs that will be injected into the Pod's hosts file if specified.
+ enableServiceLinks:
+ type: boolean
+ description: Indicates whether information about services should be injected into Pod's environment variables.
+ tmpDirSizeLimit:
+ type: string
+ pattern: "^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
+ description: "Defines the total amount of pod memory allocated for the temporary `EmptyDir` volume `/tmp`. Specify the allocation in memory units, for example, `100Mi` for 100 mebibytes. Default value is `5Mi`. The `/tmp` volume is backed by pod memory, not disk storage, so avoid setting a high value as it consumes pod memory resources."
+ volumes:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
type: string
- type: object
- description: Annotations added to the Kubernetes resource.
- description: Metadata applied to the resource.
- description: Template for all Kafka `PersistentVolumeClaims`.
- kafkaContainer:
+ description: Name to use for the volume. Required.
+ secret:
+ type: object
+ properties:
+ defaultMode:
+ type: integer
+ items:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ optional:
+ type: boolean
+ secretName:
+ type: string
+ description: Secret to use populate the volume.
+ configMap:
+ type: object
+ properties:
+ defaultMode:
+ type: integer
+ items:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ description: ConfigMap to use to populate the volume.
+ emptyDir:
+ type: object
+ properties:
+ medium:
+ type: string
+ sizeLimit:
+ type: object
+ properties:
+ amount:
+ type: string
+ format:
+ type: string
+ description: EmptyDir to use to populate the volume.
+ persistentVolumeClaim:
+ type: object
+ properties:
+ claimName:
+ type: string
+ readOnly:
+ type: boolean
+ description: PersistentVolumeClaim object to use to populate the volume.
+ oneOf:
+ - properties:
+ secret: {}
+ configMap: {}
+ emptyDir: {}
+ persistentVolumeClaim: {}
+ required: []
+ description: Additional volumes that can be mounted to the pod.
+ description: Template for Kafka Connect Build `Pods`. The build pod is used only on Kubernetes.
+ buildContainer:
type: object
properties:
env:
@@ -14834,94 +16105,181 @@ spec:
runAsUserName:
type: string
description: Security context for the container.
- description: Template for the Kafka broker container.
- initContainer:
- type: object
- properties:
- env:
+ volumeMounts:
type: array
items:
type: object
properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
name:
type: string
- description: The environment variable key.
- value:
+ readOnly:
+ type: boolean
+ recursiveReadOnly:
type: string
- description: The environment variable value.
- description: Environment variables which should be applied to the container.
- securityContext:
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ description: Additional volume mounts which should be applied to the container.
+ description: Template for the Kafka Connect Build container. The build container is used only on Kubernetes.
+ buildConfig:
+ type: object
+ properties:
+ metadata:
type: object
properties:
- allowPrivilegeEscalation:
- type: boolean
- appArmorProfile:
+ labels:
+ additionalProperties:
+ type: string
type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- capabilities:
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
type: object
- properties:
- add:
- type: array
- items:
- type: string
- drop:
- type: array
- items:
- type: string
- privileged:
- type: boolean
- procMount:
- type: string
- readOnlyRootFilesystem:
- type: boolean
- runAsGroup:
- type: integer
- runAsNonRoot:
- type: boolean
- runAsUser:
- type: integer
- seLinuxOptions:
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata to apply to the `PodDisruptionBudgetTemplate` resource.
+ pullSecret:
+ type: string
+ description: Container Registry Secret with the credentials for pulling the base image.
+ description: Template for the Kafka Connect BuildConfig used to build new container images. The BuildConfig is used only on OpenShift.
+ buildServiceAccount:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
type: object
- properties:
- level:
- type: string
- role:
- type: string
- type:
- type: string
- user:
- type: string
- seccompProfile:
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
type: object
- properties:
- localhostProfile:
- type: string
- type:
- type: string
- windowsOptions:
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ description: Template for the Kafka Connect Build service account.
+ jmxSecret:
+ type: object
+ properties:
+ metadata:
+ type: object
+ properties:
+ labels:
+ additionalProperties:
+ type: string
type: object
- properties:
- gmsaCredentialSpec:
- type: string
- gmsaCredentialSpecName:
- type: string
- hostProcess:
- type: boolean
- runAsUserName:
- type: string
- description: Security context for the container.
- description: Template for the Kafka init container.
- description: Template for pool resources. The template allows users to specify how the resources belonging to this pool are generated.
+ description: Labels added to the Kubernetes resource.
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ description: Annotations added to the Kubernetes resource.
+ description: Metadata applied to the resource.
+ description: Template for Secret of the Kafka Connect Cluster JMX authentication.
+ description: "Template for Kafka Connect and Kafka MirrorMaker 2 resources. The template allows users to specify how the `Pods`, `Service`, and other services are generated."
+ externalConfiguration:
+ type: object
+ properties:
+ env:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: Name of the environment variable which will be passed to the Kafka Connect pods. The name of the environment variable cannot start with `KAFKA_` or `STRIMZI_`.
+ valueFrom:
+ type: object
+ properties:
+ secretKeyRef:
+ type: object
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ description: Reference to a key in a Secret.
+ configMapKeyRef:
+ type: object
+ properties:
+ key:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ description: Reference to a key in a ConfigMap.
+ description: Value of the environment variable which will be passed to the Kafka Connect pods. It can be passed either as a reference to Secret or ConfigMap field. The field has to specify exactly one Secret or ConfigMap.
+ required:
+ - name
+ - valueFrom
+ description: Makes data from a Secret or ConfigMap available in the Kafka Connect pods as environment variables.
+ volumes:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: Name of the volume which will be added to the Kafka Connect pods.
+ secret:
+ type: object
+ properties:
+ defaultMode:
+ type: integer
+ items:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ optional:
+ type: boolean
+ secretName:
+ type: string
+ description: Reference to a key in a Secret. Exactly one Secret or ConfigMap has to be specified.
+ configMap:
+ type: object
+ properties:
+ defaultMode:
+ type: integer
+ items:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ description: Reference to a key in a ConfigMap. Exactly one Secret or ConfigMap has to be specified.
+ required:
+ - name
+ description: Makes data from a Secret or ConfigMap available in the Kafka Connect pods as volumes.
+ description: Pass data from Secrets or ConfigMaps to the Kafka Connect pods and use them to configure connectors.
required:
- - replicas
- - storage
- - roles
- description: The specification of the KafkaNodePool.
+ - connectCluster
+ description: The specification of the Kafka MirrorMaker 2 cluster.
status:
type: object
properties:
@@ -14949,217 +16307,303 @@ spec:
observedGeneration:
type: integer
description: The generation of the CRD that was last reconciled by the operator.
- nodeIds:
+ url:
+ type: string
+ description: The URL of the REST API endpoint for managing and monitoring Kafka Connect connectors.
+ connectors:
type: array
items:
- type: integer
- description: Node IDs used by Kafka nodes in this pool.
- clusterId:
- type: string
- description: Kafka cluster ID.
- roles:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ description: "List of MirrorMaker 2 connector statuses, as reported by the Kafka Connect REST API."
+ autoRestartStatuses:
type: array
items:
- type: string
- enum:
- - controller
- - broker
- description: The roles currently assigned to this pool.
- replicas:
- type: integer
- description: The current number of pods being used to provide this resource.
+ type: object
+ properties:
+ count:
+ type: integer
+ description: The number of times the connector or task is restarted.
+ connectorName:
+ type: string
+ description: The name of the connector being restarted.
+ lastRestartTimestamp:
+ type: string
+ description: The last time the automatic restart was attempted. The required format is 'yyyy-MM-ddTHH:mm:ssZ' in the UTC time zone.
+ description: List of MirrorMaker 2 connector auto restart statuses.
+ connectorPlugins:
+ type: array
+ items:
+ type: object
+ properties:
+ class:
+ type: string
+ description: The class of the connector plugin.
+ type:
+ type: string
+ description: The type of the connector plugin. The available types are `sink` and `source`.
+ version:
+ type: string
+ description: The version of the connector plugin.
+ description: The list of connector plugins available in this Kafka Connect deployment.
labelSelector:
type: string
description: Label selector for pods providing this resource.
- description: The status of the KafkaNodePool.
+ replicas:
+ type: integer
+ description: The current number of pods being used to provide this resource.
+ description: The status of the Kafka MirrorMaker 2 cluster.
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
- name: strimzi-cluster-operator-watched
+ name: strimzi-kafka-client
labels:
app: strimzi
rules:
- # Resources in this role are being watched by the operator. When operator is deployed as cluster-wide, these permissions
- # need to be granted to the operator on a cluster wide level as well, even if the operands will be deployed only in
- # few of the namespaces in given cluster. This is required to set up the Kubernetes watches and informers.
- # Note: The rights included in this role might change in the future
- apiGroups:
- ""
resources:
- # The cluster operator needs to access and delete pods, this is to allow it to monitor pod health and coordinate rolling updates
- - pods
- verbs:
- - watch
- - list
- - apiGroups:
- - "kafka.strimzi.io"
- resources:
- # The Cluster Operator operates the Strimzi custom resources
- - kafkas
- - kafkanodepools
- - kafkaconnects
- - kafkaconnectors
- - kafkamirrormakers
- - kafkabridges
- - kafkamirrormaker2s
- - kafkarebalances
- verbs:
- - get
- - list
- - watch
- - create
- - patch
- - update
- - apiGroups:
- - "kafka.strimzi.io"
- resources:
- # The Cluster Operator needs to manage the status of the Strimzi custom resources
- - kafkas/status
- - kafkanodepools/status
- - kafkaconnects/status
- - kafkaconnectors/status
- - kafkamirrormakers/status
- - kafkabridges/status
- - kafkamirrormaker2s/status
- - kafkarebalances/status
- verbs:
- - get
- - patch
- - update
- - apiGroups:
- - "core.strimzi.io"
- resources:
- # The cluster operator uses StrimziPodSets to manage the Kafka and ZooKeeper pods
- - strimzipodsets
- verbs:
- - get
- - list
- - watch
- - create
- - delete
- - patch
- - update
- - apiGroups:
- - "core.strimzi.io"
- resources:
- # The Cluster Operator needs to manage the status of the StrimziPodSet custom resource
- - strimzipodsets/status
+ # The Kafka clients (Connect, Mirror Maker, etc.) require "get" permissions to view the node they are on
+ # This information is used to generate a Rack ID (client.rack option) that is used for consuming from the closest
+ # replicas when enabled
+ - nodes
verbs:
- get
- - patch
- - update
+
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: kafkaconnectors.kafka.strimzi.io
+ labels:
+ app: strimzi
+ strimzi.io/crd-install: "true"
+spec:
+ group: kafka.strimzi.io
+ names:
+ kind: KafkaConnector
+ listKind: KafkaConnectorList
+ singular: kafkaconnector
+ plural: kafkaconnectors
+ shortNames:
+ - kctr
+ categories:
+ - strimzi
+ scope: Namespaced
+ conversion:
+ strategy: None
+ versions:
+ - name: v1beta2
+ served: true
+ storage: true
+ subresources:
+ status: {}
+ scale:
+ specReplicasPath: .spec.tasksMax
+ statusReplicasPath: .status.tasksMax
+ additionalPrinterColumns:
+ - name: Cluster
+ description: The name of the Kafka Connect cluster this connector belongs to
+ jsonPath: .metadata.labels.strimzi\.io/cluster
+ type: string
+ - name: Connector class
+ description: The class used by this connector
+ jsonPath: .spec.class
+ type: string
+ - name: Max Tasks
+ description: Maximum number of tasks
+ jsonPath: .spec.tasksMax
+ type: integer
+ - name: Ready
+ description: The state of the custom resource
+ jsonPath: ".status.conditions[?(@.type==\"Ready\")].status"
+ type: string
+ schema:
+ openAPIV3Schema:
+ type: object
+ properties:
+ apiVersion:
+ type: string
+ description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources"
+ kind:
+ type: string
+ description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
+ metadata:
+ type: object
+ spec:
+ type: object
+ properties:
+ class:
+ type: string
+ description: The Class for the Kafka Connector.
+ tasksMax:
+ type: integer
+ minimum: 1
+ description: The maximum number of tasks for the Kafka Connector.
+ autoRestart:
+ type: object
+ properties:
+ enabled:
+ type: boolean
+ description: Whether automatic restart for failed connectors and tasks should be enabled or disabled.
+ maxRestarts:
+ type: integer
+ description: "The maximum number of connector restarts that the operator will try. If the connector remains in a failed state after reaching this limit, it must be restarted manually by the user. Defaults to an unlimited number of restarts."
+ description: Automatic restart of connector and tasks configuration.
+ config:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ description: "The Kafka Connector configuration. The following properties cannot be set: name, connector.class, tasks.max."
+ pause:
+ type: boolean
+ description: Whether the connector should be paused. Defaults to false.
+ state:
+ type: string
+ enum:
+ - paused
+ - stopped
+ - running
+ description: The state the connector should be in. Defaults to running.
+ description: The specification of the Kafka Connector.
+ status:
+ type: object
+ properties:
+ conditions:
+ type: array
+ items:
+ type: object
+ properties:
+ type:
+ type: string
+ description: "The unique identifier of a condition, used to distinguish between other conditions in the resource."
+ status:
+ type: string
+ description: "The status of the condition, either True, False or Unknown."
+ lastTransitionTime:
+ type: string
+ description: "Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone."
+ reason:
+ type: string
+ description: The reason for the condition's last transition (a single word in CamelCase).
+ message:
+ type: string
+ description: Human-readable message indicating details about the condition's last transition.
+ description: List of status conditions.
+ observedGeneration:
+ type: integer
+ description: The generation of the CRD that was last reconciled by the operator.
+ autoRestart:
+ type: object
+ properties:
+ count:
+ type: integer
+ description: The number of times the connector or task is restarted.
+ connectorName:
+ type: string
+ description: The name of the connector being restarted.
+ lastRestartTimestamp:
+ type: string
+ description: The last time the automatic restart was attempted. The required format is 'yyyy-MM-ddTHH:mm:ssZ' in the UTC time zone.
+ description: The auto restart status.
+ connectorStatus:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ description: "The connector status, as reported by the Kafka Connect REST API."
+ tasksMax:
+ type: integer
+ description: The maximum number of tasks for the Kafka Connector.
+ topics:
+ type: array
+ items:
+ type: string
+ description: The list of topics used by the Kafka Connector.
+ description: The status of the Kafka Connector.
---
apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
+kind: RoleBinding
metadata:
- name: strimzi-kafka-client
+ name: strimzi-cluster-operator
labels:
app: strimzi
-rules:
- - apiGroups:
- - ""
- resources:
- # The Kafka clients (Connect, Mirror Maker, etc.) require "get" permissions to view the node they are on
- # This information is used to generate a Rack ID (client.rack option) that is used for consuming from the closest
- # replicas when enabled
- - nodes
- verbs:
- - get
+subjects:
+ - kind: ServiceAccount
+ name: strimzi-cluster-operator
+ namespace: myproject
+roleRef:
+ kind: ClusterRole
+ name: strimzi-cluster-operator-namespaced
+ apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
- name: strimzi-entity-operator
+ name: strimzi-kafka-broker
labels:
app: strimzi
rules:
- - apiGroups:
- - "kafka.strimzi.io"
- resources:
- # The Entity Operator contains the Topic Operator which needs to access and manage KafkaTopic resources
- - kafkatopics
- verbs:
- - get
- - list
- - watch
- - create
- - patch
- - update
- - delete
- - apiGroups:
- - "kafka.strimzi.io"
- resources:
- # The Entity Operator contains the User Operator which needs to access and manage KafkaUser resources
- - kafkausers
- verbs:
- - get
- - list
- - watch
- - create
- - patch
- - update
- - apiGroups:
- - "kafka.strimzi.io"
- resources:
- # The Entity Operator contains the Topic Operator which needs to access and manage KafkaTopic resources
- - kafkatopics/status
- # The Entity Operator contains the User Operator which needs to access and manage KafkaUser resources
- - kafkausers/status
- verbs:
- - get
- - patch
- - update
- apiGroups:
- ""
resources:
- # The entity operator user-operator needs to access and manage secrets to store generated credentials
- - secrets
+ # The Kafka Brokers require "get" permissions to view the node they are on
+ # This information is used to generate a Rack ID that is used for High Availability configurations
+ - nodes
verbs:
- get
- - list
- - watch
- - create
- - delete
- - patch
- - update
---
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
+kind: ConfigMap
+apiVersion: v1
metadata:
- name: strimzi-cluster-operator-leader-election
+ name: strimzi-cluster-operator
labels:
app: strimzi
-subjects:
- - kind: ServiceAccount
- name: strimzi-cluster-operator
- namespace: myproject
-roleRef:
- kind: ClusterRole
- name: strimzi-cluster-operator-leader-election
- apiGroup: rbac.authorization.k8s.io
+data:
+ log4j2.properties: |
+ name = COConfig
+ monitorInterval = 30
+
+ appender.console.type = Console
+ appender.console.name = STDOUT
+ appender.console.layout.type = PatternLayout
+ appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
+
+ rootLogger.level = ${env:STRIMZI_LOG_LEVEL:-INFO}
+ rootLogger.appenderRefs = stdout
+ rootLogger.appenderRef.console.ref = STDOUT
+
+ # Kafka AdminClient logging is a bit noisy at INFO level
+ logger.kafka.name = org.apache.kafka
+ logger.kafka.level = WARN
+
+ # Zookeeper is very verbose even on INFO level -> We set it to WARN by default
+ logger.zookeepertrustmanager.name = org.apache.zookeeper
+ logger.zookeepertrustmanager.level = WARN
+
+ # Keeps separate level for Netty logging -> to not be changed by the root logger
+ logger.netty.name = io.netty
+ logger.netty.level = INFO
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
- name: kafkamirrormaker2s.kafka.strimzi.io
+ name: kafkaconnects.kafka.strimzi.io
labels:
app: strimzi
strimzi.io/crd-install: "true"
spec:
group: kafka.strimzi.io
names:
- kind: KafkaMirrorMaker2
- listKind: KafkaMirrorMaker2List
- singular: kafkamirrormaker2
- plural: kafkamirrormaker2s
+ kind: KafkaConnect
+ listKind: KafkaConnectList
+ singular: kafkaconnect
+ plural: kafkaconnects
shortNames:
- - kmm2
+ - kc
categories:
- strimzi
scope: Namespaced
@@ -15177,7 +16621,7 @@ spec:
labelSelectorPath: .status.labelSelector
additionalPrinterColumns:
- name: Desired replicas
- description: The desired number of Kafka MirrorMaker 2 replicas
+ description: The desired number of Kafka Connect replicas
jsonPath: .spec.replicas
type: integer
- name: Ready
@@ -15192,356 +16636,238 @@ spec:
type: string
description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources"
kind:
- type: string
- description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
- metadata:
- type: object
- spec:
- type: object
- properties:
- version:
- type: string
- description: The Kafka Connect version. Defaults to the latest version. Consult the user documentation to understand the process required to upgrade or downgrade the version.
- replicas:
- type: integer
- description: The number of pods in the Kafka Connect group. Defaults to `3`.
- image:
- type: string
- description: "The container image used for Kafka Connect pods. If no image name is explicitly specified, it is determined based on the `spec.version` configuration. The image names are specifically mapped to corresponding versions in the Cluster Operator configuration."
- connectCluster:
- type: string
- description: The cluster alias used for Kafka Connect. The value must match the alias of the *target* Kafka cluster as specified in the `spec.clusters` configuration. The target Kafka cluster is used by the underlying Kafka Connect framework for its internal topics.
- clusters:
- type: array
- items:
- type: object
- properties:
- alias:
- type: string
- pattern: "^[a-zA-Z0-9\\._\\-]{1,100}$"
- description: Alias used to reference the Kafka cluster.
- bootstrapServers:
- type: string
- description: A comma-separated list of `host:port` pairs for establishing the connection to the Kafka cluster.
- tls:
- type: object
- properties:
- trustedCertificates:
- type: array
- items:
- type: object
- properties:
- secretName:
- type: string
- description: The name of the Secret containing the certificate.
- certificate:
- type: string
- description: The name of the file certificate in the secret.
- pattern:
- type: string
- description: "Pattern for the certificate files in the secret. Use the link:https://en.wikipedia.org/wiki/Glob_(programming)[_glob syntax_] for the pattern. All files in the secret that match the pattern are used."
- oneOf:
- - properties:
- certificate: {}
- required:
- - certificate
- - properties:
- pattern: {}
- required:
- - pattern
- required:
- - secretName
- description: Trusted certificates for TLS connection.
- description: TLS configuration for connecting MirrorMaker 2 connectors to a cluster.
- authentication:
+ type: string
+ description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
+ metadata:
+ type: object
+ spec:
+ type: object
+ properties:
+ version:
+ type: string
+ description: The Kafka Connect version. Defaults to the latest version. Consult the user documentation to understand the process required to upgrade or downgrade the version.
+ replicas:
+ type: integer
+ description: The number of pods in the Kafka Connect group. Defaults to `3`.
+ image:
+ type: string
+ description: "The container image used for Kafka Connect pods. If no image name is explicitly specified, it is determined based on the `spec.version` configuration. The image names are specifically mapped to corresponding versions in the Cluster Operator configuration."
+ bootstrapServers:
+ type: string
+ description: Bootstrap servers to connect to. This should be given as a comma separated list of __:__ pairs.
+ tls:
+ type: object
+ properties:
+ trustedCertificates:
+ type: array
+ items:
type: object
properties:
- accessToken:
- type: object
- properties:
- key:
- type: string
- description: The key under which the secret value is stored in the Kubernetes Secret.
- secretName:
- type: string
- description: The name of the Kubernetes Secret containing the secret value.
- required:
- - key
- - secretName
- description: Link to Kubernetes Secret containing the access token which was obtained from the authorization server.
- accessTokenIsJwt:
- type: boolean
- description: Configure whether access token should be treated as JWT. This should be set to `false` if the authorization server returns opaque tokens. Defaults to `true`.
- audience:
- type: string
- description: "OAuth audience to use when authenticating against the authorization server. Some authorization servers require the audience to be explicitly set. The possible values depend on how the authorization server is configured. By default, `audience` is not specified when performing the token endpoint request."
- certificateAndKey:
- type: object
- properties:
- secretName:
- type: string
- description: The name of the Secret containing the certificate.
- certificate:
- type: string
- description: The name of the file certificate in the Secret.
- key:
- type: string
- description: The name of the private key in the Secret.
- required:
- - secretName
- - certificate
- - key
- description: Reference to the `Secret` which holds the certificate and private key pair.
- clientId:
- type: string
- description: OAuth Client ID which the Kafka client can use to authenticate against the OAuth server and use the token endpoint URI.
- clientSecret:
- type: object
- properties:
- key:
- type: string
- description: The key under which the secret value is stored in the Kubernetes Secret.
- secretName:
- type: string
- description: The name of the Kubernetes Secret containing the secret value.
- required:
- - key
- - secretName
- description: Link to Kubernetes Secret containing the OAuth client secret which the Kafka client can use to authenticate against the OAuth server and use the token endpoint URI.
- connectTimeoutSeconds:
- type: integer
- description: "The connect timeout in seconds when connecting to authorization server. If not set, the effective connect timeout is 60 seconds."
- disableTlsHostnameVerification:
- type: boolean
- description: Enable or disable TLS hostname verification. Default value is `false`.
- enableMetrics:
- type: boolean
- description: Enable or disable OAuth metrics. Default value is `false`.
- httpRetries:
- type: integer
- description: "The maximum number of retries to attempt if an initial HTTP request fails. If not set, the default is to not attempt any retries."
- httpRetryPauseMs:
- type: integer
- description: "The pause to take before retrying a failed HTTP request. If not set, the default is to not pause at all but to immediately repeat a request."
- includeAcceptHeader:
- type: boolean
- description: Whether the Accept header should be set in requests to the authorization servers. The default value is `true`.
- maxTokenExpirySeconds:
- type: integer
- description: Set or limit time-to-live of the access tokens to the specified number of seconds. This should be set if the authorization server returns opaque tokens.
- passwordSecret:
- type: object
- properties:
- secretName:
- type: string
- description: The name of the Secret containing the password.
- password:
- type: string
- description: The name of the key in the Secret under which the password is stored.
- required:
- - secretName
- - password
- description: Reference to the `Secret` which holds the password.
- readTimeoutSeconds:
- type: integer
- description: "The read timeout in seconds when connecting to authorization server. If not set, the effective read timeout is 60 seconds."
- refreshToken:
- type: object
- properties:
- key:
- type: string
- description: The key under which the secret value is stored in the Kubernetes Secret.
- secretName:
- type: string
- description: The name of the Kubernetes Secret containing the secret value.
- required:
- - key
- - secretName
- description: Link to Kubernetes Secret containing the refresh token which can be used to obtain access token from the authorization server.
- scope:
- type: string
- description: OAuth scope to use when authenticating against the authorization server. Some authorization servers require this to be set. The possible values depend on how authorization server is configured. By default `scope` is not specified when doing the token endpoint request.
- tlsTrustedCertificates:
- type: array
- items:
- type: object
- properties:
- secretName:
- type: string
- description: The name of the Secret containing the certificate.
- certificate:
- type: string
- description: The name of the file certificate in the secret.
- pattern:
- type: string
- description: "Pattern for the certificate files in the secret. Use the link:https://en.wikipedia.org/wiki/Glob_(programming)[_glob syntax_] for the pattern. All files in the secret that match the pattern are used."
- oneOf:
- - properties:
- certificate: {}
- required:
- - certificate
- - properties:
- pattern: {}
- required:
- - pattern
- required:
- - secretName
- description: Trusted certificates for TLS connection to the OAuth server.
- tokenEndpointUri:
+ secretName:
type: string
- description: Authorization server token endpoint URI.
- type:
+ description: The name of the Secret containing the certificate.
+ certificate:
type: string
- enum:
- - tls
- - scram-sha-256
- - scram-sha-512
- - plain
- - oauth
- description: "Authentication type. Currently the supported types are `tls`, `scram-sha-256`, `scram-sha-512`, `plain`, and 'oauth'. `scram-sha-256` and `scram-sha-512` types use SASL SCRAM-SHA-256 and SASL SCRAM-SHA-512 Authentication, respectively. `plain` type uses SASL PLAIN Authentication. `oauth` type uses SASL OAUTHBEARER Authentication. The `tls` type uses TLS Client Authentication. The `tls` type is supported only over TLS connections."
- username:
+ description: The name of the file certificate in the secret.
+ pattern:
type: string
- description: Username used for the authentication.
+ description: "Pattern for the certificate files in the secret. Use the link:https://en.wikipedia.org/wiki/Glob_(programming)[_glob syntax_] for the pattern. All files in the secret that match the pattern are used."
+ oneOf:
+ - properties:
+ certificate: {}
+ required:
+ - certificate
+ - properties:
+ pattern: {}
+ required:
+ - pattern
required:
- - type
- description: Authentication configuration for connecting to the cluster.
- config:
- x-kubernetes-preserve-unknown-fields: true
- type: object
- description: "The MirrorMaker 2 cluster config. Properties with the following prefixes cannot be set: ssl., sasl., security., listeners, plugin.path, rest., bootstrap.servers, consumer.interceptor.classes, producer.interceptor.classes (with the exception of: ssl.endpoint.identification.algorithm, ssl.cipher.suites, ssl.protocol, ssl.enabled.protocols)."
- required:
- - alias
- - bootstrapServers
- description: Kafka clusters for mirroring.
- mirrors:
- type: array
- items:
- type: object
- properties:
- sourceCluster:
- type: string
- description: The alias of the source cluster used by the Kafka MirrorMaker 2 connectors. The alias must match a cluster in the list at `spec.clusters`.
- targetCluster:
+ - secretName
+ description: Trusted certificates for TLS connection.
+ description: TLS configuration.
+ authentication:
+ type: object
+ properties:
+ accessToken:
+ type: object
+ properties:
+ key:
+ type: string
+ description: The key under which the secret value is stored in the Kubernetes Secret.
+ secretName:
+ type: string
+ description: The name of the Kubernetes Secret containing the secret value.
+ required:
+ - key
+ - secretName
+ description: Link to Kubernetes Secret containing the access token which was obtained from the authorization server.
+ accessTokenIsJwt:
+ type: boolean
+ description: Configure whether access token should be treated as JWT. This should be set to `false` if the authorization server returns opaque tokens. Defaults to `true`.
+ accessTokenLocation:
+ type: string
+ description: Path to the token file containing an access token to be used for authentication.
+ audience:
+ type: string
+ description: "OAuth audience to use when authenticating against the authorization server. Some authorization servers require the audience to be explicitly set. The possible values depend on how the authorization server is configured. By default, `audience` is not specified when performing the token endpoint request."
+ certificateAndKey:
+ type: object
+ properties:
+ secretName:
+ type: string
+ description: The name of the Secret containing the certificate.
+ certificate:
+ type: string
+ description: The name of the file certificate in the Secret.
+ key:
+ type: string
+ description: The name of the private key in the Secret.
+ required:
+ - secretName
+ - certificate
+ - key
+ description: Reference to the `Secret` which holds the certificate and private key pair.
+ clientAssertion:
+ type: object
+ properties:
+ key:
+ type: string
+ description: The key under which the secret value is stored in the Kubernetes Secret.
+ secretName:
+ type: string
+ description: The name of the Kubernetes Secret containing the secret value.
+ required:
+ - key
+ - secretName
+ description: Link to Kubernetes secret containing the client assertion which was manually configured for the client.
+ clientAssertionLocation:
+ type: string
+ description: Path to the file containing the client assertion to be used for authentication.
+ clientAssertionType:
+ type: string
+ description: "The client assertion type. If not set, and either `clientAssertion` or `clientAssertionLocation` is configured, this value defaults to `urn:ietf:params:oauth:client-assertion-type:jwt-bearer`."
+ clientId:
+ type: string
+ description: OAuth Client ID which the Kafka client can use to authenticate against the OAuth server and use the token endpoint URI.
+ clientSecret:
+ type: object
+ properties:
+ key:
+ type: string
+ description: The key under which the secret value is stored in the Kubernetes Secret.
+ secretName:
+ type: string
+ description: The name of the Kubernetes Secret containing the secret value.
+ required:
+ - key
+ - secretName
+ description: Link to Kubernetes Secret containing the OAuth client secret which the Kafka client can use to authenticate against the OAuth server and use the token endpoint URI.
+ connectTimeoutSeconds:
+ type: integer
+ description: "The connect timeout in seconds when connecting to authorization server. If not set, the effective connect timeout is 60 seconds."
+ disableTlsHostnameVerification:
+ type: boolean
+ description: Enable or disable TLS hostname verification. Default value is `false`.
+ enableMetrics:
+ type: boolean
+ description: Enable or disable OAuth metrics. Default value is `false`.
+ httpRetries:
+ type: integer
+ description: "The maximum number of retries to attempt if an initial HTTP request fails. If not set, the default is to not attempt any retries."
+ httpRetryPauseMs:
+ type: integer
+ description: "The pause to take before retrying a failed HTTP request. If not set, the default is to not pause at all but to immediately repeat a request."
+ includeAcceptHeader:
+ type: boolean
+ description: Whether the Accept header should be set in requests to the authorization servers. The default value is `true`.
+ maxTokenExpirySeconds:
+ type: integer
+ description: Set or limit time-to-live of the access tokens to the specified number of seconds. This should be set if the authorization server returns opaque tokens.
+ passwordSecret:
+ type: object
+ properties:
+ secretName:
+ type: string
+ description: The name of the Secret containing the password.
+ password:
+ type: string
+ description: The name of the key in the Secret under which the password is stored.
+ required:
+ - secretName
+ - password
+ description: Reference to the `Secret` which holds the password.
+ readTimeoutSeconds:
+ type: integer
+ description: "The read timeout in seconds when connecting to authorization server. If not set, the effective read timeout is 60 seconds."
+ refreshToken:
+ type: object
+ properties:
+ key:
+ type: string
+ description: The key under which the secret value is stored in the Kubernetes Secret.
+ secretName:
+ type: string
+ description: The name of the Kubernetes Secret containing the secret value.
+ required:
+ - key
+ - secretName
+ description: Link to Kubernetes Secret containing the refresh token which can be used to obtain access token from the authorization server.
+ saslExtensions:
+ additionalProperties:
type: string
- description: The alias of the target cluster used by the Kafka MirrorMaker 2 connectors. The alias must match a cluster in the list at `spec.clusters`.
- sourceConnector:
+ type: object
+ description: SASL extensions parameters.
+ scope:
+ type: string
+ description: OAuth scope to use when authenticating against the authorization server. Some authorization servers require this to be set. The possible values depend on how authorization server is configured. By default `scope` is not specified when doing the token endpoint request.
+ tlsTrustedCertificates:
+ type: array
+ items:
type: object
properties:
- tasksMax:
- type: integer
- minimum: 1
- description: The maximum number of tasks for the Kafka Connector.
- pause:
- type: boolean
- description: Whether the connector should be paused. Defaults to false.
- config:
- x-kubernetes-preserve-unknown-fields: true
- type: object
- description: "The Kafka Connector configuration. The following properties cannot be set: name, connector.class, tasks.max."
- state:
+ secretName:
type: string
- enum:
- - paused
- - stopped
- - running
- description: The state the connector should be in. Defaults to running.
- autoRestart:
- type: object
- properties:
- enabled:
- type: boolean
- description: Whether automatic restart for failed connectors and tasks should be enabled or disabled.
- maxRestarts:
- type: integer
- description: "The maximum number of connector restarts that the operator will try. If the connector remains in a failed state after reaching this limit, it must be restarted manually by the user. Defaults to an unlimited number of restarts."
- description: Automatic restart of connector and tasks configuration.
- description: The specification of the Kafka MirrorMaker 2 source connector.
- heartbeatConnector:
- type: object
- properties:
- tasksMax:
- type: integer
- minimum: 1
- description: The maximum number of tasks for the Kafka Connector.
- pause:
- type: boolean
- description: Whether the connector should be paused. Defaults to false.
- config:
- x-kubernetes-preserve-unknown-fields: true
- type: object
- description: "The Kafka Connector configuration. The following properties cannot be set: name, connector.class, tasks.max."
- state:
+ description: The name of the Secret containing the certificate.
+ certificate:
type: string
- enum:
- - paused
- - stopped
- - running
- description: The state the connector should be in. Defaults to running.
- autoRestart:
- type: object
- properties:
- enabled:
- type: boolean
- description: Whether automatic restart for failed connectors and tasks should be enabled or disabled.
- maxRestarts:
- type: integer
- description: "The maximum number of connector restarts that the operator will try. If the connector remains in a failed state after reaching this limit, it must be restarted manually by the user. Defaults to an unlimited number of restarts."
- description: Automatic restart of connector and tasks configuration.
- description: The specification of the Kafka MirrorMaker 2 heartbeat connector.
- checkpointConnector:
- type: object
- properties:
- tasksMax:
- type: integer
- minimum: 1
- description: The maximum number of tasks for the Kafka Connector.
- pause:
- type: boolean
- description: Whether the connector should be paused. Defaults to false.
- config:
- x-kubernetes-preserve-unknown-fields: true
- type: object
- description: "The Kafka Connector configuration. The following properties cannot be set: name, connector.class, tasks.max."
- state:
+ description: The name of the file certificate in the secret.
+ pattern:
type: string
- enum:
- - paused
- - stopped
- - running
- description: The state the connector should be in. Defaults to running.
- autoRestart:
- type: object
- properties:
- enabled:
- type: boolean
- description: Whether automatic restart for failed connectors and tasks should be enabled or disabled.
- maxRestarts:
- type: integer
- description: "The maximum number of connector restarts that the operator will try. If the connector remains in a failed state after reaching this limit, it must be restarted manually by the user. Defaults to an unlimited number of restarts."
- description: Automatic restart of connector and tasks configuration.
- description: The specification of the Kafka MirrorMaker 2 checkpoint connector.
- topicsPattern:
- type: string
- description: "A regular expression matching the topics to be mirrored, for example, \"topic1\\|topic2\\|topic3\". Comma-separated lists are also supported."
- topicsBlacklistPattern:
- type: string
- description: A regular expression matching the topics to exclude from mirroring. Comma-separated lists are also supported.
- topicsExcludePattern:
- type: string
- description: A regular expression matching the topics to exclude from mirroring. Comma-separated lists are also supported.
- groupsPattern:
- type: string
- description: A regular expression matching the consumer groups to be mirrored. Comma-separated lists are also supported.
- groupsBlacklistPattern:
- type: string
- description: A regular expression matching the consumer groups to exclude from mirroring. Comma-separated lists are also supported.
- groupsExcludePattern:
- type: string
- description: A regular expression matching the consumer groups to exclude from mirroring. Comma-separated lists are also supported.
- required:
- - sourceCluster
- - targetCluster
- description: Configuration of the MirrorMaker 2 connectors.
+ description: "Pattern for the certificate files in the secret. Use the link:https://en.wikipedia.org/wiki/Glob_(programming)[_glob syntax_] for the pattern. All files in the secret that match the pattern are used."
+ oneOf:
+ - properties:
+ certificate: {}
+ required:
+ - certificate
+ - properties:
+ pattern: {}
+ required:
+ - pattern
+ required:
+ - secretName
+ description: Trusted certificates for TLS connection to the OAuth server.
+ tokenEndpointUri:
+ type: string
+ description: Authorization server token endpoint URI.
+ type:
+ type: string
+ enum:
+ - tls
+ - scram-sha-256
+ - scram-sha-512
+ - plain
+ - oauth
+ description: "Authentication type. Currently the supported types are `tls`, `scram-sha-256`, `scram-sha-512`, `plain`, and 'oauth'. `scram-sha-256` and `scram-sha-512` types use SASL SCRAM-SHA-256 and SASL SCRAM-SHA-512 Authentication, respectively. `plain` type uses SASL PLAIN Authentication. `oauth` type uses SASL OAUTHBEARER Authentication. The `tls` type uses TLS Client Authentication. The `tls` type is supported only over TLS connections."
+ username:
+ type: string
+ description: Username used for the authentication.
+ required:
+ - type
+ description: Authentication configuration for Kafka Connect.
+ config:
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ description: "The Kafka Connect configuration. Properties with the following prefixes cannot be set: ssl., sasl., security., listeners, plugin.path, rest., bootstrap.servers, consumer.interceptor.classes, producer.interceptor.classes (with the exception of: ssl.endpoint.identification.algorithm, ssl.cipher.suites, ssl.protocol, ssl.enabled.protocols)."
resources:
type: object
properties:
@@ -16294,7 +17620,86 @@ spec:
tmpDirSizeLimit:
type: string
pattern: "^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
- description: Defines the total amount (for example `1Gi`) of local storage required for temporary EmptyDir volume (`/tmp`). Default value is `5Mi`.
+ description: "Defines the total amount of pod memory allocated for the temporary `EmptyDir` volume `/tmp`. Specify the allocation in memory units, for example, `100Mi` for 100 mebibytes. Default value is `5Mi`. The `/tmp` volume is backed by pod memory, not disk storage, so avoid setting a high value as it consumes pod memory resources."
+ volumes:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: Name to use for the volume. Required.
+ secret:
+ type: object
+ properties:
+ defaultMode:
+ type: integer
+ items:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ optional:
+ type: boolean
+ secretName:
+ type: string
+ description: Secret to use populate the volume.
+ configMap:
+ type: object
+ properties:
+ defaultMode:
+ type: integer
+ items:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ description: ConfigMap to use to populate the volume.
+ emptyDir:
+ type: object
+ properties:
+ medium:
+ type: string
+ sizeLimit:
+ type: object
+ properties:
+ amount:
+ type: string
+ format:
+ type: string
+ description: EmptyDir to use to populate the volume.
+ persistentVolumeClaim:
+ type: object
+ properties:
+ claimName:
+ type: string
+ readOnly:
+ type: boolean
+ description: PersistentVolumeClaim object to use to populate the volume.
+ oneOf:
+ - properties:
+ secret: {}
+ configMap: {}
+ emptyDir: {}
+ persistentVolumeClaim: {}
+ required: []
+ description: Additional volumes that can be mounted to the pod.
description: Template for Kafka Connect `Pods`.
apiService:
type: object
@@ -16442,6 +17847,26 @@ spec:
runAsUserName:
type: string
description: Security context for the container.
+ volumeMounts:
+ type: array
+ items:
+ type: object
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ recursiveReadOnly:
+ type: string
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ description: Additional volume mounts which should be applied to the container.
description: Template for the Kafka Connect container.
initContainer:
type: object
@@ -16523,6 +17948,26 @@ spec:
runAsUserName:
type: string
description: Security context for the container.
+ volumeMounts:
+ type: array
+ items:
+ type: object
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ recursiveReadOnly:
+ type: string
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ description: Additional volume mounts which should be applied to the container.
description: Template for the Kafka init container.
podDisruptionBudget:
type: object
@@ -17085,7 +18530,86 @@ spec:
tmpDirSizeLimit:
type: string
pattern: "^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
- description: Defines the total amount (for example `1Gi`) of local storage required for temporary EmptyDir volume (`/tmp`). Default value is `5Mi`.
+ description: "Defines the total amount of pod memory allocated for the temporary `EmptyDir` volume `/tmp`. Specify the allocation in memory units, for example, `100Mi` for 100 mebibytes. Default value is `5Mi`. The `/tmp` volume is backed by pod memory, not disk storage, so avoid setting a high value as it consumes pod memory resources."
+ volumes:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: Name to use for the volume. Required.
+ secret:
+ type: object
+ properties:
+ defaultMode:
+ type: integer
+ items:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ optional:
+ type: boolean
+ secretName:
+ type: string
+ description: Secret to use populate the volume.
+ configMap:
+ type: object
+ properties:
+ defaultMode:
+ type: integer
+ items:
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ type: string
+ mode:
+ type: integer
+ path:
+ type: string
+ name:
+ type: string
+ optional:
+ type: boolean
+ description: ConfigMap to use to populate the volume.
+ emptyDir:
+ type: object
+ properties:
+ medium:
+ type: string
+ sizeLimit:
+ type: object
+ properties:
+ amount:
+ type: string
+ format:
+ type: string
+ description: EmptyDir to use to populate the volume.
+ persistentVolumeClaim:
+ type: object
+ properties:
+ claimName:
+ type: string
+ readOnly:
+ type: boolean
+ description: PersistentVolumeClaim object to use to populate the volume.
+ oneOf:
+ - properties:
+ secret: {}
+ configMap: {}
+ emptyDir: {}
+ persistentVolumeClaim: {}
+ required: []
+ description: Additional volumes that can be mounted to the pod.
description: Template for Kafka Connect Build `Pods`. The build pod is used only on Kubernetes.
buildContainer:
type: object
@@ -17167,6 +18691,26 @@ spec:
runAsUserName:
type: string
description: Security context for the container.
+ volumeMounts:
+ type: array
+ items:
+ type: object
+ properties:
+ mountPath:
+ type: string
+ mountPropagation:
+ type: string
+ name:
+ type: string
+ readOnly:
+ type: boolean
+ recursiveReadOnly:
+ type: string
+ subPath:
+ type: string
+ subPathExpr:
+ type: string
+ description: Additional volume mounts which should be applied to the container.
description: Template for the Kafka Connect Build container. The build container is used only on Kubernetes.
buildConfig:
type: object
@@ -17225,7 +18769,7 @@ spec:
description: Annotations added to the Kubernetes resource.
description: Metadata applied to the resource.
description: Template for Secret of the Kafka Connect Cluster JMX authentication.
- description: "Template for Kafka Connect and Kafka Mirror Maker 2 resources. The template allows users to specify how the `Pods`, `Service`, and other services are generated."
+ description: "Template for Kafka Connect and Kafka MirrorMaker 2 resources. The template allows users to specify how the `Pods`, `Service`, and other services are generated."
externalConfiguration:
type: object
properties:
@@ -17319,9 +18863,122 @@ spec:
- name
description: Makes data from a Secret or ConfigMap available in the Kafka Connect pods as volumes.
description: Pass data from Secrets or ConfigMaps to the Kafka Connect pods and use them to configure connectors.
+ build:
+ type: object
+ properties:
+ output:
+ type: object
+ properties:
+ additionalKanikoOptions:
+ type: array
+ items:
+ type: string
+ description: "Configures additional options which will be passed to the Kaniko executor when building the new Connect image. Allowed options are: --customPlatform, --insecure, --insecure-pull, --insecure-registry, --log-format, --log-timestamp, --registry-mirror, --reproducible, --single-snapshot, --skip-tls-verify, --skip-tls-verify-pull, --skip-tls-verify-registry, --verbosity, --snapshotMode, --use-new-run. These options will be used only on Kubernetes where the Kaniko executor is used. They will be ignored on OpenShift. The options are described in the link:https://github.com/GoogleContainerTools/kaniko[Kaniko GitHub repository^]. Changing this field does not trigger new build of the Kafka Connect image."
+ image:
+ type: string
+ description: The name of the image which will be built. Required.
+ pushSecret:
+ type: string
+ description: Container Registry Secret with the credentials for pushing the newly built image.
+ type:
+ type: string
+ enum:
+ - docker
+ - imagestream
+ description: Output type. Must be either `docker` for pushing the newly build image to Docker compatible registry or `imagestream` for pushing the image to OpenShift ImageStream. Required.
+ required:
+ - image
+ - type
+ description: Configures where should the newly built image be stored. Required.
+ plugins:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ pattern: "^[a-z0-9][-_a-z0-9]*[a-z0-9]$"
+ description: "The unique name of the connector plugin. Will be used to generate the path where the connector artifacts will be stored. The name has to be unique within the KafkaConnect resource. The name has to follow the following pattern: `^[a-z][-_a-z0-9]*[a-z]$`. Required."
+ artifacts:
+ type: array
+ items:
+ type: object
+ properties:
+ artifact:
+ type: string
+ description: Maven artifact id. Applicable to the `maven` artifact type only.
+ fileName:
+ type: string
+ description: Name under which the artifact will be stored.
+ group:
+ type: string
+ description: Maven group id. Applicable to the `maven` artifact type only.
+ insecure:
+ type: boolean
+ description: "By default, connections using TLS are verified to check they are secure. The server certificate used must be valid, trusted, and contain the server name. By setting this option to `true`, all TLS verification is disabled and the artifact will be downloaded, even when the server is considered insecure."
+ repository:
+ type: string
+ description: Maven repository to download the artifact from. Applicable to the `maven` artifact type only.
+ sha512sum:
+ type: string
+ description: "SHA512 checksum of the artifact. Optional. If specified, the checksum will be verified while building the new container. If not specified, the downloaded artifact will not be verified. Not applicable to the `maven` artifact type. "
+ type:
+ type: string
+ enum:
+ - jar
+ - tgz
+ - zip
+ - maven
+ - other
+ description: "Artifact type. Currently, the supported artifact types are `tgz`, `jar`, `zip`, `other` and `maven`."
+ url:
+ type: string
+ pattern: "^(https?|ftp)://[-a-zA-Z0-9+&@#/%?=~_|!:,.;]*[-a-zA-Z0-9+&@#/%=~_|]$"
+ description: "URL of the artifact which will be downloaded. Strimzi does not do any security scanning of the downloaded artifacts. For security reasons, you should first verify the artifacts manually and configure the checksum verification to make sure the same artifact is used in the automated build. Required for `jar`, `zip`, `tgz` and `other` artifacts. Not applicable to the `maven` artifact type."
+ version:
+ type: string
+ description: Maven version number. Applicable to the `maven` artifact type only.
+ required:
+ - type
+ description: List of artifacts which belong to this connector plugin. Required.
+ required:
+ - name
+ - artifacts
+ description: List of connector plugins which should be added to the Kafka Connect. Required.
+ resources:
+ type: object
+ properties:
+ claims:
+ type: array
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
+ x-kubernetes-int-or-string: true
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$"
+ x-kubernetes-int-or-string: true
+ type: object
+ description: CPU and memory resources to reserve for the build.
+ required:
+ - output
+ - plugins
+ description: Configures how the Connect container image should be built. Optional.
required:
- - connectCluster
- description: The specification of the Kafka MirrorMaker 2 cluster.
+ - bootstrapServers
+ description: The specification of the Kafka Connect cluster.
status:
type: object
properties:
@@ -17352,27 +19009,6 @@ spec:
url:
type: string
description: The URL of the REST API endpoint for managing and monitoring Kafka Connect connectors.
- connectors:
- type: array
- items:
- x-kubernetes-preserve-unknown-fields: true
- type: object
- description: "List of MirrorMaker 2 connector statuses, as reported by the Kafka Connect REST API."
- autoRestartStatuses:
- type: array
- items:
- type: object
- properties:
- count:
- type: integer
- description: The number of times the connector or task is restarted.
- connectorName:
- type: string
- description: The name of the connector being restarted.
- lastRestartTimestamp:
- type: string
- description: The last time the automatic restart was attempted. The required format is 'yyyy-MM-ddTHH:mm:ssZ' in the UTC time zone.
- description: List of MirrorMaker 2 connector auto restart statuses.
connectorPlugins:
type: array
items:
@@ -17388,12 +19024,12 @@ spec:
type: string
description: The version of the connector plugin.
description: The list of connector plugins available in this Kafka Connect deployment.
- labelSelector:
- type: string
- description: Label selector for pods providing this resource.
replicas:
type: integer
description: The current number of pods being used to provide this resource.
- description: The status of the Kafka MirrorMaker 2 cluster.
+ labelSelector:
+ type: string
+ description: Label selector for pods providing this resource.
+ description: The status of the Kafka Connect cluster.
---
diff --git a/test/rekt/features/kafka_source.go b/test/rekt/features/kafka_source.go
index 7d36136970..a6fb4dc479 100644
--- a/test/rekt/features/kafka_source.go
+++ b/test/rekt/features/kafka_source.go
@@ -22,8 +22,6 @@ import (
"fmt"
"strings"
- "knative.dev/eventing/test/rekt/features/featureflags"
-
cloudevents "github.com/cloudevents/sdk-go/v2"
. "github.com/cloudevents/sdk-go/v2/test"
cetest "github.com/cloudevents/sdk-go/v2/test"
@@ -32,6 +30,7 @@ import (
"go.uber.org/zap"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/eventing/pkg/eventingtls/eventingtlstesting"
+ "knative.dev/eventing/test/rekt/features/featureflags"
"knative.dev/pkg/apis"
duckv1 "knative.dev/pkg/apis/duck/v1"
kubeclient "knative.dev/pkg/client/injection/kube/client"
@@ -45,11 +44,10 @@ import (
"knative.dev/reconciler-test/pkg/manifest"
"knative.dev/reconciler-test/pkg/resources/service"
- "knative.dev/eventing/test/rekt/features/source"
-
testpkg "knative.dev/eventing-kafka-broker/test/pkg"
"knative.dev/eventing-kafka-broker/test/rekt/features/featuressteps"
"knative.dev/eventing-kafka-broker/test/rekt/resources/kafkasink"
+ "knative.dev/eventing/test/rekt/features/source"
internalscg "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/internalskafkaeventing/v1alpha1"
sources "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/sources/v1beta1"
@@ -287,13 +285,9 @@ type kafkaSinkConfig struct {
opts []manifest.CfgFn
}
-func kafkaSourceFeature(name string,
+func KafkaSourceFeatureSetup(f *feature.Feature,
kafkaSourceCfg kafkaSourceConfig,
- kafkaSinkCfg kafkaSinkConfig,
- senderOpts []eventshub.EventsHubOption,
- matcher cetest.EventMatcher) *feature.Feature {
-
- f := feature.NewFeatureNamed(name)
+ kafkaSinkCfg kafkaSinkConfig) (string, string) {
if kafkaSourceCfg.topic == "" {
kafkaSourceCfg.topic = feature.MakeRandomK8sName("topic")
@@ -308,7 +302,7 @@ func kafkaSourceFeature(name string,
}
receiver := feature.MakeRandomK8sName("eventshub-receiver")
- sender := feature.MakeRandomK8sName("eventshub-sender")
+
secretName := feature.MakeRandomK8sName("secret")
f.Setup("install kafka topic", kafkatopic.Install(kafkaSourceCfg.topic))
@@ -355,15 +349,22 @@ func kafkaSourceFeature(name string,
f.Setup("install kafka source", kafkasource.Install(kafkaSourceCfg.sourceName, kafkaSourceOpts...))
f.Setup("kafka source is ready", kafkasource.IsReady(kafkaSourceCfg.sourceName))
+ return kafkaSinkCfg.sinkName, receiver
+}
+
+func KafkaSourceFeatureAssert(f *feature.Feature, kafkaSink, receiver string, customizeFunc CustomizeEventFunc) {
+ sender := feature.MakeRandomK8sName("eventshub-sender")
options := []eventshub.EventsHubOption{
- eventshub.StartSenderToResource(kafkasink.GVR(), kafkaSinkCfg.sinkName),
+ eventshub.StartSenderToResource(kafkasink.GVR(), kafkaSink),
}
+
+ senderOpts, matcher := customizeFunc()
+
options = append(options, senderOpts...)
+
f.Requirement("install eventshub sender", eventshub.Install(sender, options...))
f.Assert("eventshub receiver gets event", matchEvent(receiver, matcher))
-
- return f
}
func matchEvent(sink string, matcher EventMatcher) feature.StepFn {
@@ -372,63 +373,86 @@ func matchEvent(sink string, matcher EventMatcher) feature.StepFn {
}
}
-func KafkaSourceBinaryEvent() *feature.Feature {
- senderOptions := []eventshub.EventsHubOption{
- eventshub.InputHeader("ce-specversion", cloudevents.VersionV1),
- eventshub.InputHeader("ce-type", "com.github.pull.create"),
- eventshub.InputHeader("ce-source", "github.com/cloudevents/spec/pull"),
- eventshub.InputHeader("ce-subject", "123"),
- eventshub.InputHeader("ce-id", "A234-1234-1234"),
- eventshub.InputHeader("content-type", "application/json"),
- eventshub.InputHeader("ce-comexampleextension1", "value"),
- eventshub.InputHeader("ce-comexampleothervalue", "5"),
- eventshub.InputBody(marshalJSON(map[string]string{
- "hello": "Francesco",
- })),
+// CustomizeEventFunc creates a pair of eventshub options that customize the event
+// and corresponding event matcher that will match the respective event.
+type CustomizeEventFunc func() ([]eventshub.EventsHubOption, EventMatcher)
+
+func KafkaSourceBinaryEventCustomizeFunc() CustomizeEventFunc {
+ return func() ([]eventshub.EventsHubOption, EventMatcher) {
+ id := feature.MakeRandomK8sName("id")
+ senderOptions := []eventshub.EventsHubOption{
+ eventshub.InputHeader("ce-specversion", cloudevents.VersionV1),
+ eventshub.InputHeader("ce-type", "com.github.pull.create"),
+ eventshub.InputHeader("ce-source", "github.com/cloudevents/spec/pull"),
+ eventshub.InputHeader("ce-subject", "123"),
+ eventshub.InputHeader("ce-id", id),
+ eventshub.InputHeader("content-type", "application/json"),
+ eventshub.InputHeader("ce-comexampleextension1", "value"),
+ eventshub.InputHeader("ce-comexampleothervalue", "5"),
+ eventshub.InputBody(marshalJSON(map[string]string{
+ "hello": "Francesco",
+ })),
+ }
+ matcher := AllOf(
+ HasSpecVersion(cloudevents.VersionV1),
+ HasType("com.github.pull.create"),
+ HasSource("github.com/cloudevents/spec/pull"),
+ HasSubject("123"),
+ HasId(id),
+ HasDataContentType("application/json"),
+ HasData([]byte(`{"hello":"Francesco"}`)),
+ HasExtension("comexampleextension1", "value"),
+ HasExtension("comexampleothervalue", "5"),
+ )
+ return senderOptions, matcher
}
- matcher := AllOf(
- HasSpecVersion(cloudevents.VersionV1),
- HasType("com.github.pull.create"),
- HasSource("github.com/cloudevents/spec/pull"),
- HasSubject("123"),
- HasId("A234-1234-1234"),
- HasDataContentType("application/json"),
- HasData([]byte(`{"hello":"Francesco"}`)),
- HasExtension("comexampleextension1", "value"),
- HasExtension("comexampleothervalue", "5"),
- )
+}
- return kafkaSourceFeature("KafkaSourceBinaryEvent",
+func KafkaSourceBinaryEvent() *feature.Feature {
+ f := feature.NewFeatureNamed("KafkaSourceBinaryEvent")
+
+ kafkaSink, receiver := KafkaSourceBinaryEventFeatureSetup(f)
+ KafkaSourceFeatureAssert(f, kafkaSink, receiver, KafkaSourceBinaryEventCustomizeFunc())
+
+ return f
+}
+
+func KafkaSourceBinaryEventFeatureSetup(f *feature.Feature) (string, string) {
+ return KafkaSourceFeatureSetup(f,
kafkaSourceConfig{
authMech: PlainMech,
},
kafkaSinkConfig{},
- senderOptions,
- matcher,
)
}
func KafkaSourceBinaryEventWithExtensions() *feature.Feature {
- senderOptions := []eventshub.EventsHubOption{
- eventshub.InputHeader("ce-specversion", cloudevents.VersionV1),
- eventshub.InputHeader("ce-type", "com.github.pull.create"),
- eventshub.InputHeader("ce-source", "github.com/cloudevents/spec/pull"),
- eventshub.InputHeader("ce-subject", "123"),
- eventshub.InputHeader("ce-id", "A234-1234-1234"),
- eventshub.InputHeader("content-type", "application/json"),
+ customizeFunc := func() ([]eventshub.EventsHubOption, EventMatcher) {
+ id := feature.MakeRandomK8sName("id")
+ senderOptions := []eventshub.EventsHubOption{
+ eventshub.InputHeader("ce-specversion", cloudevents.VersionV1),
+ eventshub.InputHeader("ce-type", "com.github.pull.create"),
+ eventshub.InputHeader("ce-source", "github.com/cloudevents/spec/pull"),
+ eventshub.InputHeader("ce-subject", "123"),
+ eventshub.InputHeader("ce-id", id),
+ eventshub.InputHeader("content-type", "application/json"),
+ }
+ matcher := AllOf(
+ HasSpecVersion(cloudevents.VersionV1),
+ HasType("com.github.pull.create"),
+ HasSource("github.com/cloudevents/spec/pull"),
+ HasSubject("123"),
+ HasId(id),
+ HasDataContentType("application/json"),
+ HasExtension("comexampleextension1", "value"),
+ HasExtension("comexampleothervalue", "5"),
+ )
+ return senderOptions, matcher
}
- matcher := AllOf(
- HasSpecVersion(cloudevents.VersionV1),
- HasType("com.github.pull.create"),
- HasSource("github.com/cloudevents/spec/pull"),
- HasSubject("123"),
- HasId("A234-1234-1234"),
- HasDataContentType("application/json"),
- HasExtension("comexampleextension1", "value"),
- HasExtension("comexampleothervalue", "5"),
- )
- return kafkaSourceFeature("KafkaSourceBinaryEvent",
+ f := feature.NewFeatureNamed("KafkaSourceBinaryEventWithExtensions")
+
+ kafkaSink, receiver := KafkaSourceFeatureSetup(f,
kafkaSourceConfig{
authMech: PlainMech,
opts: []manifest.CfgFn{
@@ -438,74 +462,93 @@ func KafkaSourceBinaryEventWithExtensions() *feature.Feature {
})},
},
kafkaSinkConfig{},
- senderOptions,
- matcher,
)
+ KafkaSourceFeatureAssert(f, kafkaSink, receiver, customizeFunc)
+
+ return f
}
-func KafkaSourceStructuredEvent() *feature.Feature {
- eventTime, _ := cetypes.ParseTime("2018-04-05T17:31:00Z")
- senderOptions := []eventshub.EventsHubOption{
- eventshub.InputHeader("content-type", "application/cloudevents+json"),
- eventshub.InputBody(marshalJSON(map[string]interface{}{
- "specversion": cloudevents.VersionV1,
- "type": "com.github.pull.create",
- "source": "https://github.com/cloudevents/spec/pull",
- "subject": "123",
- "id": "A234-1234-1234",
- "time": "2018-04-05T17:31:00Z",
- "datacontenttype": "application/json",
- "data": map[string]string{
- "hello": "Francesco",
- },
- "comexampleextension1": "value",
- "comexampleothervalue": 5,
- })),
+func KafkaSourceStructuredEventCustomizeFunc() CustomizeEventFunc {
+ return func() ([]eventshub.EventsHubOption, EventMatcher) {
+ id := feature.MakeRandomK8sName("id")
+ eventTime, _ := cetypes.ParseTime("2018-04-05T17:31:00Z")
+ senderOptions := []eventshub.EventsHubOption{
+ eventshub.InputHeader("content-type", "application/cloudevents+json"),
+ eventshub.InputBody(marshalJSON(map[string]interface{}{
+ "specversion": cloudevents.VersionV1,
+ "type": "com.github.pull.create",
+ "source": "https://github.com/cloudevents/spec/pull",
+ "subject": "123",
+ "id": id,
+ "time": "2018-04-05T17:31:00Z",
+ "datacontenttype": "application/json",
+ "data": map[string]string{
+ "hello": "Francesco",
+ },
+ "comexampleextension1": "value",
+ "comexampleothervalue": 5,
+ })),
+ }
+ matcher := AllOf(
+ HasSpecVersion(cloudevents.VersionV1),
+ HasType("com.github.pull.create"),
+ HasSource("https://github.com/cloudevents/spec/pull"),
+ HasSubject("123"),
+ HasId(id),
+ HasTime(eventTime),
+ HasDataContentType("application/json"),
+ HasData([]byte(`{"hello":"Francesco"}`)),
+ HasExtension("comexampleextension1", "value"),
+ HasExtension("comexampleothervalue", "5"),
+ )
+ return senderOptions, matcher
}
- matcher := AllOf(
- HasSpecVersion(cloudevents.VersionV1),
- HasType("com.github.pull.create"),
- HasSource("https://github.com/cloudevents/spec/pull"),
- HasSubject("123"),
- HasId("A234-1234-1234"),
- HasTime(eventTime),
- HasDataContentType("application/json"),
- HasData([]byte(`{"hello":"Francesco"}`)),
- HasExtension("comexampleextension1", "value"),
- HasExtension("comexampleothervalue", "5"),
- )
+}
+
+func KafkaSourceStructuredEvent() *feature.Feature {
+ f := feature.NewFeatureNamed("KafkaSourceStructuredEvent")
+
+ kafkaSink, receiver := KafkaSourceBinaryEventFeatureSetup(f)
+ KafkaSourceFeatureAssert(f, kafkaSink, receiver, KafkaSourceStructuredEventCustomizeFunc())
- return kafkaSourceFeature("KafkaSourceStructuredEvent",
+ return f
+}
+
+func KafkaSourceStructuredEventFeatureSetup(f *feature.Feature) (string, string) {
+ return KafkaSourceFeatureSetup(f,
kafkaSourceConfig{
authMech: PlainMech,
},
- kafkaSinkConfig{
- opts: []manifest.CfgFn{kafkasink.WithContentMode("structured")},
- },
- senderOptions,
- matcher,
+ kafkaSinkConfig{},
)
}
func KafkaSourceWithExtensions() *feature.Feature {
- senderOptions := []eventshub.EventsHubOption{
- eventshub.InputHeader("content-type", "application/cloudevents+json"),
- eventshub.InputBody(marshalJSON(map[string]interface{}{
- "specversion": cloudevents.VersionV1,
- "type": "com.github.pull.create",
- "source": "https://github.com/cloudevents/spec/pull",
- "id": "A234-1234-1234",
- })),
+ customizeFunc := func() ([]eventshub.EventsHubOption, EventMatcher) {
+ id := feature.MakeRandomK8sName("id")
+ senderOptions := []eventshub.EventsHubOption{
+ eventshub.InputHeader("content-type", "application/cloudevents+json"),
+ eventshub.InputBody(marshalJSON(map[string]interface{}{
+ "specversion": cloudevents.VersionV1,
+ "type": "com.github.pull.create",
+ "source": "https://github.com/cloudevents/spec/pull",
+ "id": id,
+ })),
+ }
+ matcher := AllOf(
+ HasSpecVersion(cloudevents.VersionV1),
+ HasId(id),
+ HasType("com.github.pull.create"),
+ HasSource("https://github.com/cloudevents/spec/pull"),
+ HasExtension("comexampleextension1", "value"),
+ HasExtension("comexampleothervalue", "5"),
+ )
+ return senderOptions, matcher
}
- matcher := AllOf(
- HasSpecVersion(cloudevents.VersionV1),
- HasType("com.github.pull.create"),
- HasSource("https://github.com/cloudevents/spec/pull"),
- HasExtension("comexampleextension1", "value"),
- HasExtension("comexampleothervalue", "5"),
- )
- return kafkaSourceFeature("KafkaSourceWithExtensions",
+ f := feature.NewFeatureNamed("KafkaSourceWithExtensions")
+
+ kafkaSink, receiver := KafkaSourceFeatureSetup(f,
kafkaSourceConfig{
authMech: PlainMech,
opts: []manifest.CfgFn{
@@ -517,19 +560,30 @@ func KafkaSourceWithExtensions() *feature.Feature {
kafkaSinkConfig{
opts: []manifest.CfgFn{kafkasink.WithContentMode("structured")},
},
- senderOptions,
- matcher,
)
+ KafkaSourceFeatureAssert(f, kafkaSink, receiver, customizeFunc)
+
+ return f
}
func KafkaSourceTLS(kafkaSource, kafkaSink, topic string) *feature.Feature {
- e := cetest.FullEvent()
- senderOptions := []eventshub.EventsHubOption{
- eventshub.InputEvent(e),
+ customizeFunc := func() ([]eventshub.EventsHubOption, EventMatcher) {
+ id := feature.MakeRandomK8sName("id")
+ e := cetest.FullEvent()
+ e.SetID(id)
+ senderOptions := []eventshub.EventsHubOption{
+ eventshub.InputEvent(e),
+ }
+ matcher := AllOf(
+ HasData(e.Data()),
+ HasId(id),
+ )
+ return senderOptions, matcher
}
- matcher := HasData(e.Data())
- return kafkaSourceFeature("KafkaSourceTLS",
+ f := feature.NewFeatureNamed("KafkaSourceTLS")
+
+ kafkaSink, receiver := KafkaSourceFeatureSetup(f,
kafkaSourceConfig{
authMech: TLSMech,
topic: topic,
@@ -539,9 +593,10 @@ func KafkaSourceTLS(kafkaSource, kafkaSink, topic string) *feature.Feature {
kafkaSinkConfig{
sinkName: kafkaSink,
},
- senderOptions,
- matcher,
)
+ KafkaSourceFeatureAssert(f, kafkaSink, receiver, customizeFunc)
+
+ return f
}
func KafkaSourceTLSSink() *feature.Feature {
@@ -652,20 +707,31 @@ func KafkaSourceTLSSinkTrustBundle() *feature.Feature {
}
func KafkaSourceSASL() *feature.Feature {
- e := cetest.FullEvent()
- senderOptions := []eventshub.EventsHubOption{
- eventshub.InputEvent(e),
+ customizeFunc := func() ([]eventshub.EventsHubOption, EventMatcher) {
+ id := feature.MakeRandomK8sName("id")
+ e := cetest.FullEvent()
+ e.SetID(id)
+ senderOptions := []eventshub.EventsHubOption{
+ eventshub.InputEvent(e),
+ }
+ matcher := AllOf(
+ HasData(e.Data()),
+ HasId(id),
+ )
+ return senderOptions, matcher
}
- matcher := HasData(e.Data())
- return kafkaSourceFeature("KafkaSourceSASL",
+ f := feature.NewFeatureNamed("KafkaSourceSASL")
+
+ kafkaSink, receiver := KafkaSourceFeatureSetup(f,
kafkaSourceConfig{
authMech: SASLMech,
},
kafkaSinkConfig{},
- senderOptions,
- matcher,
)
+ KafkaSourceFeatureAssert(f, kafkaSink, receiver, customizeFunc)
+
+ return f
}
func marshalJSON(val interface{}) string {
diff --git a/test/upgrade/postdowngrade.go b/test/upgrade/postdowngrade.go
deleted file mode 100644
index 834ccae17a..0000000000
--- a/test/upgrade/postdowngrade.go
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright 2021 The Knative Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package upgrade
-
-import (
- pkgupgrade "knative.dev/pkg/test/upgrade"
- "knative.dev/reconciler-test/pkg/environment"
-
- eventing "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/eventing/v1alpha1"
- "knative.dev/eventing-kafka-broker/control-plane/pkg/kafka"
- "knative.dev/eventing-kafka-broker/test/e2e_sink"
-)
-
-// BrokerPostDowngradeTest tests channel basic channel operations after
-// downgrade.
-func BrokerPostDowngradeTest() pkgupgrade.Operation {
- return pkgupgrade.NewOperation("BrokerPostDowngradeTest", func(c pkgupgrade.Context) {
- runBrokerSmokeTest(c.T, kafka.BrokerClass)
- })
-}
-
-// NamespacedBrokerPostDowngradeTest tests basic namespaced broker operations after
-// downgrade.
-func NamespacedBrokerPostDowngradeTest() pkgupgrade.Operation {
- return pkgupgrade.NewOperation("NamespacedBrokerPostDowngradeTest", func(c pkgupgrade.Context) {
- runBrokerSmokeTest(c.T, kafka.NamespacedBrokerClass)
- })
-}
-
-// ChannelPostDowngradeTest tests channel basic channel operations after
-// downgrade.
-func ChannelPostDowngradeTest() pkgupgrade.Operation {
- return pkgupgrade.NewOperation("ChannelPostDowngradeTest",
- func(c pkgupgrade.Context) {
- runChannelSmokeTest(c.T)
- })
-}
-
-// SinkPostDowngradeTest tests sink basic operations after downgrade.
-func SinkPostDowngradeTest() pkgupgrade.Operation {
- return pkgupgrade.NewOperation("SinkPostDowngradeTest", func(c pkgupgrade.Context) {
- e2e_sink.RunTestKafkaSink(c.T, eventing.ModeBinary, nil)
- })
-}
-
-// SourcePostDowngradeTest tests source operations after downgrade.
-func SourcePostDowngradeTest(glob environment.GlobalEnvironment) pkgupgrade.Operation {
- return pkgupgrade.NewOperation("SourcePostDowngradeTest",
- func(c pkgupgrade.Context) {
- runSourceSmokeTest(glob, c.T)
- })
-}
diff --git a/test/upgrade/postupgrade.go b/test/upgrade/postupgrade.go
index 1fefbd07be..1a2ec29a75 100644
--- a/test/upgrade/postupgrade.go
+++ b/test/upgrade/postupgrade.go
@@ -29,68 +29,14 @@ import (
testlib "knative.dev/eventing/test/lib"
"knative.dev/pkg/system"
pkgupgrade "knative.dev/pkg/test/upgrade"
- "knative.dev/reconciler-test/pkg/environment"
-
- eventing "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/eventing/v1alpha1"
- "knative.dev/eventing-kafka-broker/control-plane/pkg/kafka"
- "knative.dev/eventing-kafka-broker/test/e2e_sink"
)
-// BrokerPostUpgradeTest tests channel operations after upgrade.
-func BrokerPostUpgradeTest() pkgupgrade.Operation {
- return pkgupgrade.NewOperation("BrokerPostUpgradeTest", func(c pkgupgrade.Context) {
- c.T.Parallel()
- c.T.Run("Verify post-install", func(t *testing.T) {
- verifyPostInstall(t)
- })
- c.T.Run("tests", func(t *testing.T) {
- runBrokerSmokeTest(t, kafka.BrokerClass)
- })
- })
-}
-
-// NamespacedBrokerPostUpgradeTest tests channel operations after upgrade.
-func NamespacedBrokerPostUpgradeTest() pkgupgrade.Operation {
- return pkgupgrade.NewOperation("NamespacedBrokerPostUpgradeTest", func(c pkgupgrade.Context) {
- c.T.Parallel()
- c.T.Run("Verify post-install", func(t *testing.T) {
- verifyPostInstall(t)
- })
- c.T.Run("tests", func(t *testing.T) {
- runBrokerSmokeTest(t, kafka.NamespacedBrokerClass)
- })
+func VerifyPostInstallTest() pkgupgrade.Operation {
+ return pkgupgrade.NewOperation("VerifyPostInstallTest", func(c pkgupgrade.Context) {
+ verifyPostInstall(c.T)
})
}
-// ChannelPostUpgradeTest tests channel operations after upgrade.
-func ChannelPostUpgradeTest() pkgupgrade.Operation {
- return pkgupgrade.NewOperation("ChannelPostUpgradeTest",
- func(c pkgupgrade.Context) {
- runChannelSmokeTest(c.T)
- })
-}
-
-// SinkPostUpgradeTest tests sink basic operations post upgrade.
-func SinkPostUpgradeTest() pkgupgrade.Operation {
- return pkgupgrade.NewOperation("SinkPostUpgradeTest", func(c pkgupgrade.Context) {
- c.T.Parallel()
- c.T.Run("Verify post-install", func(t *testing.T) {
- verifyPostInstall(t)
- })
- c.T.Run("tests", func(t *testing.T) {
- e2e_sink.RunTestKafkaSink(t, eventing.ModeBinary, nil)
- })
- })
-}
-
-// SourcePostUpgradeTest tests source operations after upgrade.
-func SourcePostUpgradeTest(glob environment.GlobalEnvironment) pkgupgrade.Operation {
- return pkgupgrade.NewOperation("SourcePostUpgradeTest",
- func(c pkgupgrade.Context) {
- runSourceSmokeTest(glob, c.T)
- })
-}
-
func verifyPostInstall(t *testing.T) {
t.Parallel()
diff --git a/test/upgrade/preupgrade.go b/test/upgrade/preupgrade.go
deleted file mode 100644
index 598678bf35..0000000000
--- a/test/upgrade/preupgrade.go
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright 2021 The Knative Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package upgrade
-
-import (
- eventing "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/eventing/v1alpha1"
- "knative.dev/eventing-kafka-broker/control-plane/pkg/kafka"
- "knative.dev/eventing-kafka-broker/test/e2e_sink"
- pkgupgrade "knative.dev/pkg/test/upgrade"
- "knative.dev/reconciler-test/pkg/environment"
-)
-
-// BrokerPreUpgradeTest tests broker basic operations before upgrade.
-func BrokerPreUpgradeTest() pkgupgrade.Operation {
- return pkgupgrade.NewOperation("BrokerPreUpgradeTest", func(c pkgupgrade.Context) {
- runBrokerSmokeTest(c.T, kafka.BrokerClass)
- })
-}
-
-// NamespacedBrokerPreUpgradeTest tests broker basic operations before upgrade.
-func NamespacedBrokerPreUpgradeTest() pkgupgrade.Operation {
- return pkgupgrade.NewOperation("NamespacedBrokerPreUpgradeTest", func(c pkgupgrade.Context) {
- runBrokerSmokeTest(c.T, kafka.NamespacedBrokerClass)
- })
-}
-
-// ChannelPreUpgradeTest tests channel basic operations before upgrade.
-func ChannelPreUpgradeTest() pkgupgrade.Operation {
- return pkgupgrade.NewOperation("ChannelPreUpgradeTest",
- func(c pkgupgrade.Context) {
- runChannelSmokeTest(c.T)
- })
-}
-
-// SinkPreUpgradeTest tests sink basic operations pre upgrade.
-func SinkPreUpgradeTest() pkgupgrade.Operation {
- return pkgupgrade.NewOperation("SinkPreUpgradeTest", func(c pkgupgrade.Context) {
- e2e_sink.RunTestKafkaSink(c.T, eventing.ModeBinary, nil)
- })
-}
-
-// SourcePreUpgradeTest tests source operations before upgrade.
-func SourcePreUpgradeTest(glob environment.GlobalEnvironment) pkgupgrade.Operation {
- return pkgupgrade.NewOperation("SourcePreUpgradeTest",
- func(c pkgupgrade.Context) {
- runSourceSmokeTest(glob, c.T)
- })
-}
diff --git a/test/upgrade/smoke.go b/test/upgrade/smoke.go
index 5521d13cf2..42d459ca61 100644
--- a/test/upgrade/smoke.go
+++ b/test/upgrade/smoke.go
@@ -18,77 +18,149 @@ package upgrade
import (
"context"
- "testing"
+ "sync"
- cloudevents "github.com/cloudevents/sdk-go/v2"
- pkgtesting "knative.dev/eventing-kafka-broker/test/pkg"
- testbroker "knative.dev/eventing-kafka-broker/test/pkg/broker"
+ "github.com/google/uuid"
+ "knative.dev/eventing-kafka-broker/control-plane/pkg/kafka"
"knative.dev/eventing-kafka-broker/test/rekt/features"
- eventinghelpers "knative.dev/eventing/test/e2e/helpers"
- testlib "knative.dev/eventing/test/lib"
+ brokerfeatures "knative.dev/eventing/test/rekt/features/broker"
+ channelfeatures "knative.dev/eventing/test/rekt/features/channel"
+ "knative.dev/eventing/test/rekt/features/knconf"
+ brokerresources "knative.dev/eventing/test/rekt/resources/broker"
+ "knative.dev/eventing/test/rekt/resources/channel_impl"
+ "knative.dev/eventing/test/rekt/resources/subscription"
+ eventingupgrade "knative.dev/eventing/test/upgrade"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
"knative.dev/pkg/system"
"knative.dev/reconciler-test/pkg/environment"
+ "knative.dev/reconciler-test/pkg/eventshub"
+ "knative.dev/reconciler-test/pkg/feature"
"knative.dev/reconciler-test/pkg/k8s"
"knative.dev/reconciler-test/pkg/knative"
+ "knative.dev/reconciler-test/pkg/manifest"
)
var (
- channelTestRunner testlib.ComponentsTestRunner
+ brokerConfigMux = &sync.Mutex{}
+ channelConfigMux = &sync.Mutex{}
+ opts = []environment.EnvOpts{
+ knative.WithKnativeNamespace(system.Namespace()),
+ knative.WithLoggingConfig,
+ knative.WithTracingConfig,
+ k8s.WithEventListener,
+ }
)
-func runBrokerSmokeTest(t *testing.T, class string) {
- pkgtesting.RunMultiple(t, func(t *testing.T) {
- eventinghelpers.EventTransformationForTriggerTestHelper(
- context.Background(),
- t,
- /* broker version */ "v1",
- /* trigger version */ "v1",
- testbroker.CreatorForClass(class),
- )
- })
+func KafkaChannelFeature(glob environment.GlobalEnvironment) *eventingupgrade.DurableFeature {
+ // Prevent race conditions on channel_impl.EnvCfg.ChannelGK when running tests in parallel.
+ channelConfigMux.Lock()
+ defer channelConfigMux.Unlock()
+ channel_impl.EnvCfg.ChannelGK = "KafkaChannel.messaging.knative.dev"
+ channel_impl.EnvCfg.ChannelV = "v1beta1"
+
+ createSubscriberFn := func(ref *duckv1.KReference, uri string) manifest.CfgFn {
+ return subscription.WithSubscriber(ref, uri, "")
+ }
+
+ setupF := feature.NewFeature()
+ sink, ch := channelfeatures.ChannelChainSetup(setupF, 1, createSubscriberFn)
+
+ verifyF := func() *feature.Feature {
+ f := feature.NewFeatureNamed(setupF.Name)
+ channelfeatures.ChannelChainAssert(f, sink, ch)
+ return f
+ }
+
+ return &eventingupgrade.DurableFeature{SetupF: setupF, VerifyF: verifyF, Global: glob, EnvOpts: opts}
}
-func runChannelSmokeTest(t *testing.T) {
- cases := smokeTestCases()
- ctx := context.Background()
- for i := range cases {
- tt := cases[i]
- t.Run(tt.name, func(t *testing.T) {
- eventinghelpers.SingleEventForChannelTestHelper(
- ctx, t, tt.encoding, tt.version,
- "", channelTestRunner,
- )
- })
+func KafkaSinkSourceBinaryEventFeature(glob environment.GlobalEnvironment,
+) *eventingupgrade.DurableFeature {
+ setupF := feature.NewFeature()
+ kafkaSink, receiver := features.KafkaSourceBinaryEventFeatureSetup(setupF)
+
+ verifyF := func() *feature.Feature {
+ f := feature.NewFeatureNamed(setupF.Name)
+ features.KafkaSourceFeatureAssert(f, kafkaSink, receiver, features.KafkaSourceBinaryEventCustomizeFunc())
+ return f
}
+
+ return &eventingupgrade.DurableFeature{SetupF: setupF, VerifyF: verifyF, Global: glob, EnvOpts: opts}
}
-func runSourceSmokeTest(glob environment.GlobalEnvironment, t *testing.T) {
- ctx, env := glob.Environment(
- knative.WithKnativeNamespace(system.Namespace()),
- knative.WithLoggingConfig,
- knative.WithTracingConfig,
- k8s.WithEventListener,
- environment.Managed(t),
- )
+func KafkaSinkSourceStructuredEventFeature(glob environment.GlobalEnvironment,
+) *eventingupgrade.DurableFeature {
+ setupF := feature.NewFeature()
+ kafkaSink, receiver := features.KafkaSourceStructuredEventFeatureSetup(setupF)
+ verifyF := func() *feature.Feature {
+ f := feature.NewFeatureNamed(setupF.Name)
+ features.KafkaSourceFeatureAssert(f, kafkaSink, receiver, features.KafkaSourceStructuredEventCustomizeFunc())
+ return f
+ }
- env.Test(ctx, t, features.KafkaSourceStructuredEvent())
- env.Test(ctx, t, features.KafkaSourceBinaryEvent())
+ return &eventingupgrade.DurableFeature{SetupF: setupF, VerifyF: verifyF, Global: glob, EnvOpts: opts}
}
-type smokeTestCase struct {
- name string
- encoding cloudevents.Encoding
- version eventinghelpers.SubscriptionVersion
+func BrokerEventTransformationForTrigger(glob environment.GlobalEnvironment,
+) *eventingupgrade.DurableFeature {
+ // Prevent race conditions on EnvCfg.BrokerClass when running tests in parallel.
+ brokerConfigMux.Lock()
+ defer brokerConfigMux.Unlock()
+ brokerresources.EnvCfg.BrokerClass = kafka.BrokerClass
+
+ setupF := feature.NewFeature()
+ cfg := brokerfeatures.BrokerEventTransformationForTriggerSetup(setupF)
+
+ verifyF := func() *feature.Feature {
+ f := feature.NewFeatureNamed(setupF.Name)
+ brokerfeatures.BrokerEventTransformationForTriggerAssert(f, cfg)
+ return f
+ }
+
+ return &eventingupgrade.DurableFeature{SetupF: setupF, VerifyF: verifyF, Global: glob, EnvOpts: opts}
}
-func smokeTestCases() []smokeTestCase {
- return []smokeTestCase{{
- name: "BinaryV1",
- encoding: cloudevents.EncodingBinary,
- version: eventinghelpers.SubscriptionV1,
- }, {
- name: "StructuredV1",
- encoding: cloudevents.EncodingStructured,
- version: eventinghelpers.SubscriptionV1,
- }}
+func NamespacedBrokerEventTransformationForTrigger(glob environment.GlobalEnvironment,
+) *eventingupgrade.DurableFeature {
+ // Prevent race conditions on EnvCfg.BrokerClass when running tests in parallel.
+ brokerConfigMux.Lock()
+ defer brokerConfigMux.Unlock()
+ brokerresources.EnvCfg.BrokerClass = kafka.NamespacedBrokerClass
+
+ broker := "broker"
+ setupF := features.SetupNamespacedBroker(broker)
+ // Override name to match the enclosing function name.
+ setupF.Name = feature.NewFeature().Name
+
+ verifyF := func() *feature.Feature {
+ f := feature.NewFeatureNamed(setupF.Name)
+ brokerAcceptsBinaryContentModeAssert(f, broker)
+ return f
+ }
+
+ return &eventingupgrade.DurableFeature{SetupF: setupF, VerifyF: verifyF, Global: glob, EnvOpts: opts}
+}
+
+func brokerAcceptsBinaryContentModeAssert(f *feature.Feature, brokerName string) {
+ f.Assert("broker accepts binary content mode", func(ctx context.Context, t feature.T) {
+ source := feature.MakeRandomK8sName("source")
+ eventshub.Install(source,
+ eventshub.StartSenderToResource(brokerresources.GVR(), brokerName),
+ eventshub.InputHeader("ce-specversion", "1.0"),
+ eventshub.InputHeader("ce-type", "sometype"),
+ eventshub.InputHeader("ce-source", "200.request.sender.test.knative.dev"),
+ eventshub.InputHeader("ce-id", uuid.New().String()),
+ eventshub.InputHeader("content-type", "application/json"),
+ eventshub.InputBody("{}"),
+ eventshub.InputMethod("POST"),
+ )(ctx, t)
+
+ store := eventshub.StoreFromContext(ctx, source)
+ events := knconf.Correlate(store.AssertAtLeast(ctx, t, 2, knconf.SentEventMatcher("")))
+ for _, e := range events {
+ if e.Response.StatusCode < 200 || e.Response.StatusCode > 299 {
+ t.Errorf("Expected statuscode 2XX for sequence %d got %d", e.Response.Sequence, e.Response.StatusCode)
+ }
+ }
+ })
}
diff --git a/test/upgrade/suite.go b/test/upgrade/suite.go
index 335ef69b62..61271e5924 100644
--- a/test/upgrade/suite.go
+++ b/test/upgrade/suite.go
@@ -17,6 +17,9 @@
package upgrade
import (
+ "slices"
+
+ "knative.dev/eventing/test/upgrade"
pkgupgrade "knative.dev/pkg/test/upgrade"
"knative.dev/reconciler-test/pkg/environment"
@@ -25,30 +28,43 @@ import (
// Suite defines the whole upgrade test suite for Eventing Kafka.
func Suite(glob environment.GlobalEnvironment) pkgupgrade.Suite {
+ g := upgrade.FeatureGroupWithUpgradeTests{
+ // Features that will run the same test post-upgrade and post-downgrade.
+ upgrade.NewFeatureSmoke(KafkaSinkSourceBinaryEventFeature(glob)),
+ upgrade.NewFeatureSmoke(KafkaSinkSourceStructuredEventFeature(glob)),
+ upgrade.NewFeatureSmoke(BrokerEventTransformationForTrigger(glob)),
+ upgrade.NewFeatureSmoke(NamespacedBrokerEventTransformationForTrigger(glob)),
+ upgrade.NewFeatureSmoke(KafkaChannelFeature(glob)),
+ // Features that will be created pre-upgrade and verified/removed post-upgrade.
+ upgrade.NewFeatureOnlyUpgrade(KafkaSinkSourceBinaryEventFeature(glob)),
+ upgrade.NewFeatureOnlyUpgrade(KafkaSinkSourceStructuredEventFeature(glob)),
+ upgrade.NewFeatureOnlyUpgrade(BrokerEventTransformationForTrigger(glob)),
+ upgrade.NewFeatureOnlyUpgrade(NamespacedBrokerEventTransformationForTrigger(glob)),
+ upgrade.NewFeatureOnlyUpgrade(KafkaChannelFeature(glob)),
+ // Features that will be created pre-upgrade, verified post-upgrade, verified and removed post-downgrade.
+ upgrade.NewFeatureUpgradeDowngrade(KafkaSinkSourceBinaryEventFeature(glob)),
+ upgrade.NewFeatureUpgradeDowngrade(KafkaSinkSourceStructuredEventFeature(glob)),
+ upgrade.NewFeatureUpgradeDowngrade(BrokerEventTransformationForTrigger(glob)),
+ upgrade.NewFeatureUpgradeDowngrade(NamespacedBrokerEventTransformationForTrigger(glob)),
+ upgrade.NewFeatureUpgradeDowngrade(KafkaChannelFeature(glob)),
+ // Features that will be created post-upgrade, verified and removed post-downgrade.
+ upgrade.NewFeatureOnlyDowngrade(KafkaSinkSourceBinaryEventFeature(glob)),
+ upgrade.NewFeatureOnlyDowngrade(KafkaSinkSourceStructuredEventFeature(glob)),
+ upgrade.NewFeatureOnlyDowngrade(BrokerEventTransformationForTrigger(glob)),
+ upgrade.NewFeatureOnlyDowngrade(NamespacedBrokerEventTransformationForTrigger(glob)),
+ upgrade.NewFeatureOnlyDowngrade(KafkaChannelFeature(glob)),
+ }
return pkgupgrade.Suite{
Tests: pkgupgrade.Tests{
- PreUpgrade: []pkgupgrade.Operation{
- BrokerPreUpgradeTest(),
- NamespacedBrokerPreUpgradeTest(),
- ChannelPreUpgradeTest(),
- SinkPreUpgradeTest(),
- SourcePreUpgradeTest(glob),
- },
- PostUpgrade: []pkgupgrade.Operation{
- BrokerPostUpgradeTest(),
- NamespacedBrokerPostUpgradeTest(),
- ChannelPostUpgradeTest(),
- SinkPostUpgradeTest(),
- SourcePostUpgradeTest(glob),
- },
- PostDowngrade: []pkgupgrade.Operation{
- BrokerPostDowngradeTest(),
- NamespacedBrokerPostDowngradeTest(),
- ChannelPostDowngradeTest(),
- SinkPostDowngradeTest(),
- SourcePostDowngradeTest(glob),
- },
- Continual: ContinualTests(),
+ PreUpgrade: g.PreUpgradeTests(),
+ PostUpgrade: slices.Concat(
+ []pkgupgrade.Operation{
+ VerifyPostInstallTest(),
+ },
+ g.PostUpgradeTests(),
+ ),
+ PostDowngrade: g.PostDowngradeTests(),
+ Continual: ContinualTests(),
},
Installations: pkgupgrade.Installations{
Base: []pkgupgrade.Operation{
diff --git a/third_party/eventing-latest/eventing-core.yaml b/third_party/eventing-latest/eventing-core.yaml
index 795164cdbc..5c5dd4a4d1 100644
--- a/third_party/eventing-latest/eventing-core.yaml
+++ b/third_party/eventing-latest/eventing-core.yaml
@@ -16,7 +16,7 @@ kind: Namespace
metadata:
name: knative-eventing
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
---
@@ -40,7 +40,7 @@ metadata:
name: eventing-controller
namespace: knative-eventing
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
---
apiVersion: rbac.authorization.k8s.io/v1
@@ -48,7 +48,7 @@ kind: ClusterRoleBinding
metadata:
name: eventing-controller
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
subjects:
- kind: ServiceAccount
@@ -64,7 +64,7 @@ kind: ClusterRoleBinding
metadata:
name: eventing-controller-resolver
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
subjects:
- kind: ServiceAccount
@@ -80,7 +80,7 @@ kind: ClusterRoleBinding
metadata:
name: eventing-controller-source-observer
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
subjects:
- kind: ServiceAccount
@@ -96,7 +96,7 @@ kind: ClusterRoleBinding
metadata:
name: eventing-controller-sources-controller
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
subjects:
- kind: ServiceAccount
@@ -112,7 +112,7 @@ kind: ClusterRoleBinding
metadata:
name: eventing-controller-manipulator
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
subjects:
- kind: ServiceAccount
@@ -128,7 +128,7 @@ kind: ClusterRoleBinding
metadata:
name: eventing-controller-crossnamespace-subscriber
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
subjects:
- kind: ServiceAccount
@@ -160,7 +160,7 @@ metadata:
name: job-sink
namespace: knative-eventing
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
---
apiVersion: rbac.authorization.k8s.io/v1
@@ -168,7 +168,7 @@ kind: ClusterRoleBinding
metadata:
name: knative-eventing-job-sink
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
subjects:
- kind: ServiceAccount
@@ -200,7 +200,7 @@ metadata:
name: pingsource-mt-adapter
namespace: knative-eventing
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
---
apiVersion: rbac.authorization.k8s.io/v1
@@ -208,7 +208,7 @@ kind: ClusterRoleBinding
metadata:
name: knative-eventing-pingsource-mt-adapter
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
subjects:
- kind: ServiceAccount
@@ -240,7 +240,7 @@ metadata:
name: eventing-webhook
namespace: knative-eventing
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
---
apiVersion: rbac.authorization.k8s.io/v1
@@ -248,7 +248,7 @@ kind: ClusterRoleBinding
metadata:
name: eventing-webhook
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
subjects:
- kind: ServiceAccount
@@ -265,7 +265,7 @@ metadata:
namespace: knative-eventing
name: eventing-webhook
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
subjects:
- kind: ServiceAccount
@@ -281,7 +281,7 @@ kind: ClusterRoleBinding
metadata:
name: eventing-webhook-resolver
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
subjects:
- kind: ServiceAccount
@@ -297,7 +297,7 @@ kind: ClusterRoleBinding
metadata:
name: eventing-webhook-podspecable-binding
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
subjects:
- kind: ServiceAccount
@@ -329,7 +329,7 @@ metadata:
name: config-br-default-channel
namespace: knative-eventing
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
data:
channel-template-spec: |
@@ -357,7 +357,7 @@ metadata:
name: config-br-defaults
namespace: knative-eventing
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
data:
# Configures the default for any Broker that does not specify a spec.config or Broker class.
@@ -394,7 +394,7 @@ metadata:
name: default-ch-webhook
namespace: knative-eventing
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
data:
# Configuration for defaulting channels that do not specify CRD implementations.
@@ -429,7 +429,7 @@ metadata:
namespace: knative-eventing
annotations:
knative.dev/example-checksum: "9185c153"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
data:
_example: |
@@ -475,7 +475,7 @@ metadata:
labels:
knative.dev/config-propagation: original
knative.dev/config-category: eventing
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
data:
# ALPHA feature: The kreference-group allows you to use the Group field in KReferences.
@@ -580,7 +580,7 @@ metadata:
name: config-leader-election
namespace: knative-eventing
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
annotations:
knative.dev/example-checksum: "f7948630"
@@ -643,7 +643,7 @@ metadata:
labels:
knative.dev/config-propagation: original
knative.dev/config-category: eventing
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
data:
# Common configuration for all Knative codebase
@@ -696,7 +696,7 @@ metadata:
labels:
knative.dev/config-propagation: original
knative.dev/config-category: eventing
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
annotations:
knative.dev/example-checksum: "f46cf09d"
@@ -770,7 +770,7 @@ metadata:
name: config-sugar
namespace: knative-eventing
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
annotations:
knative.dev/example-checksum: "62dfac6f"
@@ -829,7 +829,7 @@ metadata:
labels:
knative.dev/config-propagation: original
knative.dev/config-category: eventing
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
annotations:
knative.dev/example-checksum: "0492ceb0"
@@ -886,7 +886,7 @@ metadata:
labels:
knative.dev/high-availability: "true"
app.kubernetes.io/component: eventing-controller
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
bindings.knative.dev/exclude: "true"
spec:
@@ -898,7 +898,7 @@ spec:
labels:
app: eventing-controller
app.kubernetes.io/component: eventing-controller
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
spec:
# To avoid node becoming SPOF, spread our replicas to different nodes.
@@ -916,7 +916,7 @@ spec:
containers:
- name: eventing-controller
terminationMessagePolicy: FallbackToLogsOnError
- image: gcr.io/knative-nightly/knative.dev/eventing/cmd/controller@sha256:c84d3ac5f432417c249d3916e2fca248d421e1020f9040f9b165950df27a1b8c
+ image: gcr.io/knative-nightly/knative.dev/eventing/cmd/controller@sha256:06d48f23cb21191caa95d4485884485a31c65ee13e1a57543b4289509e87724d
resources:
requests:
cpu: 100m
@@ -934,7 +934,7 @@ spec:
value: knative.dev/eventing
# APIServerSource
- name: APISERVER_RA_IMAGE
- value: gcr.io/knative-nightly/knative.dev/eventing/cmd/apiserver_receive_adapter@sha256:d848d766be1d8c1f94dc1eed01c02e74293309b77003f6202136636cabdccee0
+ value: gcr.io/knative-nightly/knative.dev/eventing/cmd/apiserver_receive_adapter@sha256:61576118c87ecb3e5b3fa0e1adf917bc0d6fb50d38efc94478c00ce8158db103
- name: POD_NAME
valueFrom:
fieldRef:
@@ -1004,7 +1004,7 @@ metadata:
namespace: knative-eventing
labels:
app.kubernetes.io/component: job-sink
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
spec:
replicas: 1
@@ -1016,7 +1016,7 @@ spec:
labels:
sinks.knative.dev/sink: job-sink
app.kubernetes.io/component: job-sink
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
spec:
affinity:
@@ -1032,7 +1032,7 @@ spec:
containers:
- name: job-sink
terminationMessagePolicy: FallbackToLogsOnError
- image: gcr.io/knative-nightly/knative.dev/eventing/cmd/jobsink@sha256:7e4e5ca83691ae173c589465dc695b90c9388e21f6f3cad38de2e199f2d7ece7
+ image: gcr.io/knative-nightly/knative.dev/eventing/cmd/jobsink@sha256:0717331839c43f6d36d5940d47fc59b880df4d7b12914f85f50aa5ade2baec1d
env:
- name: SYSTEM_NAMESPACE
valueFrom:
@@ -1114,7 +1114,7 @@ metadata:
labels:
sinks.knative.dev/sink: job-sink
app.kubernetes.io/component: job-sink
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
name: job-sink
namespace: knative-eventing
@@ -1157,7 +1157,7 @@ metadata:
namespace: knative-eventing
labels:
app.kubernetes.io/component: pingsource-mt-adapter
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
bindings.knative.dev/exclude: "true"
spec:
@@ -1173,7 +1173,7 @@ spec:
eventing.knative.dev/source: ping-source-controller
sources.knative.dev/role: adapter
app.kubernetes.io/component: pingsource-mt-adapter
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
spec:
affinity:
@@ -1189,7 +1189,7 @@ spec:
enableServiceLinks: false
containers:
- name: dispatcher
- image: gcr.io/knative-nightly/knative.dev/eventing/cmd/mtping@sha256:1d49cc9f7e48d69c78dd91075628c0c3d20cef1147d4ea582df6dab72027f1f1
+ image: gcr.io/knative-nightly/knative.dev/eventing/cmd/mtping@sha256:aba83d0c8b94be6c8968c49adf3cf3db5a58901271735ae9e60ed39cdef8209c
env:
- name: SYSTEM_NAMESPACE
value: ''
@@ -1263,7 +1263,7 @@ metadata:
namespace: knative-eventing
labels:
app.kubernetes.io/component: eventing-webhook
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
spec:
scaleTargetRef:
@@ -1288,7 +1288,7 @@ metadata:
namespace: knative-eventing
labels:
app.kubernetes.io/component: eventing-webhook
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
spec:
minAvailable: 80%
@@ -1318,7 +1318,7 @@ metadata:
namespace: knative-eventing
labels:
app.kubernetes.io/component: eventing-webhook
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
bindings.knative.dev/exclude: "true"
spec:
@@ -1332,7 +1332,7 @@ spec:
app: eventing-webhook
role: eventing-webhook
app.kubernetes.io/component: eventing-webhook
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
spec:
# To avoid node becoming SPOF, spread our replicas to different nodes.
@@ -1352,7 +1352,7 @@ spec:
terminationMessagePolicy: FallbackToLogsOnError
# This is the Go import path for the binary that is containerized
# and substituted here.
- image: gcr.io/knative-nightly/knative.dev/eventing/cmd/webhook@sha256:5c88558c6b4e54687fb62af9bc69905ebd101375d29e0f02b18cb0b9608af166
+ image: gcr.io/knative-nightly/knative.dev/eventing/cmd/webhook@sha256:a62f372749b6328a1678fefef2fb98d374805ded49598c7a6ef398d27c12b06b
resources:
requests:
# taken from serving.
@@ -1431,7 +1431,7 @@ metadata:
labels:
role: eventing-webhook
app.kubernetes.io/component: eventing-webhook
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
name: eventing-webhook
namespace: knative-eventing
@@ -1466,7 +1466,7 @@ metadata:
eventing.knative.dev/source: "true"
duck.knative.dev/source: "true"
knative.dev/crd-install: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
annotations:
# TODO add schemas
@@ -1753,7 +1753,7 @@ metadata:
labels:
knative.dev/crd-install: "true"
duck.knative.dev/addressable: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
spec:
group: eventing.knative.dev
@@ -1968,7 +1968,7 @@ metadata:
knative.dev/crd-install: "true"
messaging.knative.dev/subscribable: "true"
duck.knative.dev/addressable: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
spec:
group: messaging.knative.dev
@@ -2326,7 +2326,7 @@ metadata:
eventing.knative.dev/source: "true"
duck.knative.dev/source: "true"
knative.dev/crd-install: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
name: containersources.sources.knative.dev
spec:
@@ -2501,7 +2501,7 @@ metadata:
name: eventpolicies.eventing.knative.dev
labels:
knative.dev/crd-install: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
spec:
group: eventing.knative.dev
@@ -2720,7 +2720,7 @@ metadata:
name: eventtypes.eventing.knative.dev
labels:
knative.dev/crd-install: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
spec:
group: eventing.knative.dev
@@ -3113,7 +3113,7 @@ metadata:
labels:
knative.dev/crd-install: "true"
duck.knative.dev/addressable: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
spec:
group: sinks.knative.dev
@@ -3215,6 +3215,10 @@ spec:
type:
description: 'Type of condition.'
type: string
+ observedGeneration:
+ description: ObservedGeneration is the 'Generation' of the Service that was last processed by the controller.
+ type: integer
+ format: int64
additionalPrinterColumns:
- name: URL
type: string
@@ -3260,7 +3264,7 @@ metadata:
labels:
knative.dev/crd-install: "true"
duck.knative.dev/addressable: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
spec:
group: flows.knative.dev
@@ -3788,7 +3792,7 @@ metadata:
eventing.knative.dev/source: "true"
duck.knative.dev/source: "true"
knative.dev/crd-install: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
annotations:
# TODO add schema
@@ -4160,7 +4164,7 @@ metadata:
labels:
knative.dev/crd-install: "true"
duck.knative.dev/addressable: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
spec:
group: flows.knative.dev
@@ -4544,7 +4548,7 @@ metadata:
duck.knative.dev/source: "true"
duck.knative.dev/binding: "true"
knative.dev/crd-install: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
name: sinkbindings.sources.knative.dev
spec:
@@ -4759,7 +4763,7 @@ metadata:
name: subscriptions.messaging.knative.dev
labels:
knative.dev/crd-install: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
spec:
group: messaging.knative.dev
@@ -5021,7 +5025,7 @@ metadata:
name: triggers.eventing.knative.dev
labels:
knative.dev/crd-install: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
spec:
group: eventing.knative.dev
@@ -5120,6 +5124,9 @@ spec:
description: Retry is the minimum number of retries the sender should attempt when sending an event before moving it to the dead letter sink.
type: integer
format: int32
+ format:
+ description: Format is the format used to serialize the event into a http request when delivering the event. It can be json (for structured events), binary (for binary events), or unset.
+ type: string
filter:
description: 'Filter is the filter to apply against all events from the Broker. Only events that pass this filter will be sent to the Subscriber. If not specified, will default to allowing all events.'
type: object
@@ -5294,7 +5301,7 @@ kind: ClusterRole
metadata:
name: addressable-resolver
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
aggregationRule:
clusterRoleSelectors:
@@ -5308,7 +5315,7 @@ metadata:
name: service-addressable-resolver
labels:
duck.knative.dev/addressable: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
# Do not use this role directly. These rules will be added to the "addressable-resolver" role.
rules:
@@ -5327,7 +5334,7 @@ metadata:
name: serving-addressable-resolver
labels:
duck.knative.dev/addressable: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
# Do not use this role directly. These rules will be added to the "addressable-resolver" role.
rules:
@@ -5349,7 +5356,7 @@ metadata:
name: channel-addressable-resolver
labels:
duck.knative.dev/addressable: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
# Do not use this role directly. These rules will be added to the "addressable-resolver" role.
rules:
@@ -5375,7 +5382,7 @@ metadata:
name: broker-addressable-resolver
labels:
duck.knative.dev/addressable: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
# Do not use this role directly. These rules will be added to the "addressable-resolver" role.
rules:
@@ -5395,7 +5402,7 @@ metadata:
name: flows-addressable-resolver
labels:
duck.knative.dev/addressable: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
# Do not use this role directly. These rules will be added to the "addressable-resolver" role.
rules:
@@ -5431,7 +5438,7 @@ kind: ClusterRole
metadata:
name: eventing-broker-filter
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
rules:
- apiGroups:
@@ -5457,7 +5464,7 @@ kind: ClusterRole
metadata:
name: eventing-broker-ingress
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
rules:
- apiGroups:
@@ -5474,7 +5481,7 @@ kind: ClusterRole
metadata:
name: eventing-config-reader
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
rules:
- apiGroups:
@@ -5507,7 +5514,7 @@ kind: ClusterRole
metadata:
name: channelable-manipulator
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
aggregationRule:
clusterRoleSelectors:
@@ -5521,7 +5528,7 @@ metadata:
name: meta-channelable-manipulator
labels:
duck.knative.dev/channelable: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
# Do not use this role directly. These rules will be added to the "channelable-manipulator" role.
rules:
@@ -5560,7 +5567,7 @@ metadata:
name: knative-eventing-namespaced-admin
labels:
rbac.authorization.k8s.io/aggregate-to-admin: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
rules:
- apiGroups: ["eventing.knative.dev"]
@@ -5573,7 +5580,7 @@ metadata:
name: knative-messaging-namespaced-admin
labels:
rbac.authorization.k8s.io/aggregate-to-admin: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
rules:
- apiGroups: ["messaging.knative.dev"]
@@ -5586,7 +5593,7 @@ metadata:
name: knative-flows-namespaced-admin
labels:
rbac.authorization.k8s.io/aggregate-to-admin: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
rules:
- apiGroups: ["flows.knative.dev"]
@@ -5599,7 +5606,7 @@ metadata:
name: knative-sources-namespaced-admin
labels:
rbac.authorization.k8s.io/aggregate-to-admin: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
rules:
- apiGroups: ["sources.knative.dev"]
@@ -5612,7 +5619,7 @@ metadata:
name: knative-bindings-namespaced-admin
labels:
rbac.authorization.k8s.io/aggregate-to-admin: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
rules:
- apiGroups: ["bindings.knative.dev"]
@@ -5625,7 +5632,7 @@ metadata:
name: knative-eventing-namespaced-edit
labels:
rbac.authorization.k8s.io/aggregate-to-edit: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
rules:
- apiGroups: ["eventing.knative.dev", "messaging.knative.dev", "sources.knative.dev", "flows.knative.dev", "bindings.knative.dev"]
@@ -5638,7 +5645,7 @@ metadata:
name: knative-eventing-namespaced-view
labels:
rbac.authorization.k8s.io/aggregate-to-view: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
rules:
- apiGroups: ["eventing.knative.dev", "messaging.knative.dev", "sources.knative.dev", "flows.knative.dev", "bindings.knative.dev"]
@@ -5665,7 +5672,7 @@ kind: ClusterRole
metadata:
name: knative-eventing-controller
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
rules:
- apiGroups:
@@ -5874,7 +5881,7 @@ kind: ClusterRole
metadata:
name: crossnamespace-subscriber
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
aggregationRule:
clusterRoleSelectors:
@@ -5888,7 +5895,7 @@ metadata:
name: channel-subscriber
labels:
duck.knative.dev/crossnamespace-subscribable: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
rules:
- apiGroups:
@@ -5904,7 +5911,7 @@ metadata:
name: broker-subscriber
labels:
duck.knative.dev/crossnamespace-subscribable: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
rules:
- apiGroups:
@@ -5934,7 +5941,7 @@ kind: ClusterRole
metadata:
name: knative-eventing-job-sink
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
rules:
- apiGroups:
@@ -6028,7 +6035,7 @@ kind: ClusterRole
metadata:
name: knative-eventing-pingsource-mt-adapter
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
rules:
- apiGroups:
@@ -6101,7 +6108,7 @@ kind: ClusterRole
metadata:
name: podspecable-binding
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
aggregationRule:
clusterRoleSelectors:
@@ -6115,7 +6122,7 @@ metadata:
name: builtin-podspecable-binding
labels:
duck.knative.dev/podspecable: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
# Do not use this role directly. These rules will be added to the "podspecable-binding role.
rules:
@@ -6161,7 +6168,7 @@ kind: ClusterRole
metadata:
name: source-observer
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
aggregationRule:
clusterRoleSelectors:
@@ -6175,7 +6182,7 @@ metadata:
name: eventing-sources-source-observer
labels:
duck.knative.dev/source: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
# Do not use this role directly. These rules will be added to the "source-observer" role.
rules:
@@ -6211,7 +6218,7 @@ kind: ClusterRole
metadata:
name: knative-eventing-sources-controller
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
rules:
- apiGroups:
@@ -6332,7 +6339,7 @@ kind: ClusterRole
metadata:
name: knative-eventing-webhook
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
rules:
# For watching logging configuration and getting certs.
@@ -6509,7 +6516,7 @@ metadata:
namespace: knative-eventing
name: knative-eventing-webhook
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
rules:
# For manipulating certs into secrets.
@@ -6545,7 +6552,7 @@ kind: ValidatingWebhookConfiguration
metadata:
name: config.webhook.eventing.knative.dev
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
webhooks:
- admissionReviewVersions: ["v1", "v1beta1"]
@@ -6583,7 +6590,7 @@ kind: MutatingWebhookConfiguration
metadata:
name: webhook.eventing.knative.dev
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
webhooks:
- admissionReviewVersions: ["v1", "v1beta1"]
@@ -6616,7 +6623,7 @@ kind: ValidatingWebhookConfiguration
metadata:
name: validation.webhook.eventing.knative.dev
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
webhooks:
- admissionReviewVersions: ["v1", "v1beta1"]
@@ -6650,7 +6657,7 @@ metadata:
name: eventing-webhook-certs
namespace: knative-eventing
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
# The data is populated at install time.
@@ -6674,7 +6681,7 @@ kind: MutatingWebhookConfiguration
metadata:
name: sinkbindings.webhook.sources.knative.dev
labels:
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
webhooks:
- admissionReviewVersions: ["v1", "v1beta1"]
diff --git a/third_party/eventing-latest/eventing-crds.yaml b/third_party/eventing-latest/eventing-crds.yaml
index 786fc4408c..21b8c499bb 100644
--- a/third_party/eventing-latest/eventing-crds.yaml
+++ b/third_party/eventing-latest/eventing-crds.yaml
@@ -20,7 +20,7 @@ metadata:
eventing.knative.dev/source: "true"
duck.knative.dev/source: "true"
knative.dev/crd-install: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
annotations:
# TODO add schemas
@@ -307,7 +307,7 @@ metadata:
labels:
knative.dev/crd-install: "true"
duck.knative.dev/addressable: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
spec:
group: eventing.knative.dev
@@ -522,7 +522,7 @@ metadata:
knative.dev/crd-install: "true"
messaging.knative.dev/subscribable: "true"
duck.knative.dev/addressable: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
spec:
group: messaging.knative.dev
@@ -880,7 +880,7 @@ metadata:
eventing.knative.dev/source: "true"
duck.knative.dev/source: "true"
knative.dev/crd-install: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
name: containersources.sources.knative.dev
spec:
@@ -1055,7 +1055,7 @@ metadata:
name: eventpolicies.eventing.knative.dev
labels:
knative.dev/crd-install: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
spec:
group: eventing.knative.dev
@@ -1274,7 +1274,7 @@ metadata:
name: eventtypes.eventing.knative.dev
labels:
knative.dev/crd-install: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
spec:
group: eventing.knative.dev
@@ -1667,7 +1667,7 @@ metadata:
labels:
knative.dev/crd-install: "true"
duck.knative.dev/addressable: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
spec:
group: sinks.knative.dev
@@ -1769,6 +1769,10 @@ spec:
type:
description: 'Type of condition.'
type: string
+ observedGeneration:
+ description: ObservedGeneration is the 'Generation' of the Service that was last processed by the controller.
+ type: integer
+ format: int64
additionalPrinterColumns:
- name: URL
type: string
@@ -1814,7 +1818,7 @@ metadata:
labels:
knative.dev/crd-install: "true"
duck.knative.dev/addressable: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
spec:
group: flows.knative.dev
@@ -2342,7 +2346,7 @@ metadata:
eventing.knative.dev/source: "true"
duck.knative.dev/source: "true"
knative.dev/crd-install: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
annotations:
# TODO add schema
@@ -2714,7 +2718,7 @@ metadata:
labels:
knative.dev/crd-install: "true"
duck.knative.dev/addressable: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
spec:
group: flows.knative.dev
@@ -3098,7 +3102,7 @@ metadata:
duck.knative.dev/source: "true"
duck.knative.dev/binding: "true"
knative.dev/crd-install: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
name: sinkbindings.sources.knative.dev
spec:
@@ -3313,7 +3317,7 @@ metadata:
name: subscriptions.messaging.knative.dev
labels:
knative.dev/crd-install: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
spec:
group: messaging.knative.dev
@@ -3575,7 +3579,7 @@ metadata:
name: triggers.eventing.knative.dev
labels:
knative.dev/crd-install: "true"
- app.kubernetes.io/version: "20240818-bf945f909"
+ app.kubernetes.io/version: "20240929-e7fca7646"
app.kubernetes.io/name: knative-eventing
spec:
group: eventing.knative.dev
@@ -3674,6 +3678,9 @@ spec:
description: Retry is the minimum number of retries the sender should attempt when sending an event before moving it to the dead letter sink.
type: integer
format: int32
+ format:
+ description: Format is the format used to serialize the event into a http request when delivering the event. It can be json (for structured events), binary (for binary events), or unset.
+ type: string
filter:
description: 'Filter is the filter to apply against all events from the Broker. Only events that pass this filter will be sent to the Subscriber. If not specified, will default to allowing all events.'
type: object
diff --git a/vendor/github.com/IBM/sarama/Dockerfile.kafka b/vendor/github.com/IBM/sarama/Dockerfile.kafka
index 186c2eb186..40f5f333b5 100644
--- a/vendor/github.com/IBM/sarama/Dockerfile.kafka
+++ b/vendor/github.com/IBM/sarama/Dockerfile.kafka
@@ -1,4 +1,4 @@
-FROM registry.access.redhat.com/ubi8/ubi-minimal:8.8@sha256:b93deceb59a58588d5b16429fc47f98920f84740a1f2ed6454e33275f0701b59
+FROM registry.access.redhat.com/ubi8/ubi-minimal:8.10@sha256:de2a0a20c1c3b39c3de829196de9694d09f97cd18fda1004de855ed2b4c841ba
USER root
diff --git a/vendor/github.com/IBM/sarama/async_producer.go b/vendor/github.com/IBM/sarama/async_producer.go
index f629a6a2e7..a6fa3d4a2e 100644
--- a/vendor/github.com/IBM/sarama/async_producer.go
+++ b/vendor/github.com/IBM/sarama/async_producer.go
@@ -1101,7 +1101,7 @@ func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceRespo
bp.parent.returnSuccesses(pSet.msgs)
// Retriable errors
case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
- ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend:
+ ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend, ErrKafkaStorageError:
if bp.parent.conf.Producer.Retry.Max <= 0 {
bp.parent.abandonBrokerConnection(bp.broker)
bp.parent.returnErrors(pSet.msgs, block.Err)
@@ -1134,7 +1134,7 @@ func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceRespo
switch block.Err {
case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
- ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend:
+ ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend, ErrKafkaStorageError:
Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n",
bp.broker.ID(), topic, partition, block.Err)
if bp.currentRetries[topic] == nil {
diff --git a/vendor/github.com/IBM/sarama/config.go b/vendor/github.com/IBM/sarama/config.go
index facf766436..f2f197887c 100644
--- a/vendor/github.com/IBM/sarama/config.go
+++ b/vendor/github.com/IBM/sarama/config.go
@@ -387,7 +387,7 @@ type Config struct {
// default is 250ms, since 0 causes the consumer to spin when no events are
// available. 100-500ms is a reasonable range for most cases. Kafka only
// supports precision up to milliseconds; nanoseconds will be truncated.
- // Equivalent to the JVM's `fetch.wait.max.ms`.
+ // Equivalent to the JVM's `fetch.max.wait.ms`.
MaxWaitTime time.Duration
// The maximum amount of time the consumer expects a message takes to
diff --git a/vendor/github.com/IBM/sarama/docker-compose.yml b/vendor/github.com/IBM/sarama/docker-compose.yml
index e916416d50..204768e320 100644
--- a/vendor/github.com/IBM/sarama/docker-compose.yml
+++ b/vendor/github.com/IBM/sarama/docker-compose.yml
@@ -1,8 +1,8 @@
-version: '3.9'
services:
zookeeper-1:
hostname: 'zookeeper-1'
image: 'docker.io/library/zookeeper:3.6.3'
+ init: true
restart: always
environment:
ZOO_MY_ID: '1'
@@ -15,6 +15,7 @@ services:
zookeeper-2:
hostname: 'zookeeper-2'
image: 'docker.io/library/zookeeper:3.6.3'
+ init: true
restart: always
environment:
ZOO_MY_ID: '2'
@@ -27,6 +28,7 @@ services:
zookeeper-3:
hostname: 'zookeeper-3'
image: 'docker.io/library/zookeeper:3.6.3'
+ init: true
restart: always
environment:
ZOO_MY_ID: '3'
@@ -39,6 +41,7 @@ services:
kafka-1:
hostname: 'kafka-1'
image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}'
+ init: true
build:
context: .
dockerfile: Dockerfile.kafka
@@ -84,6 +87,7 @@ services:
kafka-2:
hostname: 'kafka-2'
image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}'
+ init: true
build:
context: .
dockerfile: Dockerfile.kafka
@@ -129,6 +133,7 @@ services:
kafka-3:
hostname: 'kafka-3'
image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}'
+ init: true
build:
context: .
dockerfile: Dockerfile.kafka
@@ -174,6 +179,7 @@ services:
kafka-4:
hostname: 'kafka-4'
image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}'
+ init: true
build:
context: .
dockerfile: Dockerfile.kafka
@@ -219,6 +225,7 @@ services:
kafka-5:
hostname: 'kafka-5'
image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}'
+ init: true
build:
context: .
dockerfile: Dockerfile.kafka
@@ -264,6 +271,7 @@ services:
toxiproxy:
hostname: 'toxiproxy'
image: 'ghcr.io/shopify/toxiproxy:2.4.0'
+ init: true
healthcheck:
test: ['CMD', '/toxiproxy-cli', 'l']
interval: 15s
diff --git a/vendor/github.com/IBM/sarama/offset_manager.go b/vendor/github.com/IBM/sarama/offset_manager.go
index 1bf5459089..2948651272 100644
--- a/vendor/github.com/IBM/sarama/offset_manager.go
+++ b/vendor/github.com/IBM/sarama/offset_manager.go
@@ -251,18 +251,31 @@ func (om *offsetManager) Commit() {
}
func (om *offsetManager) flushToBroker() {
+ broker, err := om.coordinator()
+ if err != nil {
+ om.handleError(err)
+ return
+ }
+
+ // Care needs to be taken to unlock this. Don't want to defer the unlock as this would
+ // cause the lock to be held while waiting for the broker to reply.
+ broker.lock.Lock()
req := om.constructRequest()
if req == nil {
+ broker.lock.Unlock()
return
}
+ resp, rp, err := sendOffsetCommit(broker, req)
+ broker.lock.Unlock()
- broker, err := om.coordinator()
if err != nil {
om.handleError(err)
+ om.releaseCoordinator(broker)
+ _ = broker.Close()
return
}
- resp, err := broker.CommitOffset(req)
+ err = handleResponsePromise(req, resp, rp, nil)
if err != nil {
om.handleError(err)
om.releaseCoordinator(broker)
@@ -270,9 +283,20 @@ func (om *offsetManager) flushToBroker() {
return
}
+ broker.handleThrottledResponse(resp)
om.handleResponse(broker, req, resp)
}
+func sendOffsetCommit(coordinator *Broker, req *OffsetCommitRequest) (*OffsetCommitResponse, *responsePromise, error) {
+ resp := new(OffsetCommitResponse)
+ responseHeaderVersion := resp.headerVersion()
+ promise, err := coordinator.send(req, true, responseHeaderVersion)
+ if err != nil {
+ return nil, nil, err
+ }
+ return resp, promise, nil
+}
+
func (om *offsetManager) constructRequest() *OffsetCommitRequest {
r := &OffsetCommitRequest{
Version: 1,
diff --git a/vendor/github.com/IBM/sarama/transaction_manager.go b/vendor/github.com/IBM/sarama/transaction_manager.go
index ca7e13dab0..bf20b75e90 100644
--- a/vendor/github.com/IBM/sarama/transaction_manager.go
+++ b/vendor/github.com/IBM/sarama/transaction_manager.go
@@ -466,7 +466,7 @@ func (t *transactionManager) publishOffsetsToTxn(offsets topicPartitionOffsets,
resultOffsets = failedTxn
if len(resultOffsets) == 0 {
- DebugLogger.Printf("txnmgr/txn-offset-commit [%s] successful txn-offset-commit with group %s %+v\n",
+ DebugLogger.Printf("txnmgr/txn-offset-commit [%s] successful txn-offset-commit with group %s\n",
t.transactionalID, groupId)
return resultOffsets, false, nil
}
diff --git a/vendor/github.com/gobuffalo/flect/humanize.go b/vendor/github.com/gobuffalo/flect/humanize.go
index 311c8beed4..5100bfb7e7 100644
--- a/vendor/github.com/gobuffalo/flect/humanize.go
+++ b/vendor/github.com/gobuffalo/flect/humanize.go
@@ -7,6 +7,7 @@ import (
// Humanize returns first letter of sentence capitalized.
// Common acronyms are capitalized as well.
// Other capital letters in string are left as provided.
+//
// employee_salary = Employee salary
// employee_id = employee ID
// employee_mobile_number = Employee mobile number
@@ -22,6 +23,10 @@ func (i Ident) Humanize() Ident {
return New("")
}
+ if strings.TrimSpace(i.Original) == "" {
+ return i
+ }
+
parts := xappend([]string{}, Titleize(i.Parts[0]))
if len(i.Parts) > 1 {
parts = xappend(parts, i.Parts[1:]...)
diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s
index 9a7655c0f7..0782b86e3d 100644
--- a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s
+++ b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s
@@ -5,7 +5,6 @@
#include "textflag.h"
// func matchLen(a []byte, b []byte) int
-// Requires: BMI
TEXT ·matchLen(SB), NOSPLIT, $0-56
MOVQ a_base+0(FP), AX
MOVQ b_base+24(FP), CX
@@ -17,17 +16,16 @@ TEXT ·matchLen(SB), NOSPLIT, $0-56
JB matchlen_match4_standalone
matchlen_loopback_standalone:
- MOVQ (AX)(SI*1), BX
- XORQ (CX)(SI*1), BX
- TESTQ BX, BX
- JZ matchlen_loop_standalone
+ MOVQ (AX)(SI*1), BX
+ XORQ (CX)(SI*1), BX
+ JZ matchlen_loop_standalone
#ifdef GOAMD64_v3
TZCNTQ BX, BX
#else
BSFQ BX, BX
#endif
- SARQ $0x03, BX
+ SHRL $0x03, BX
LEAL (SI)(BX*1), SI
JMP gen_match_len_end
diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go
index 8d5567fe64..b7b83164bc 100644
--- a/vendor/github.com/klauspost/compress/zstd/dict.go
+++ b/vendor/github.com/klauspost/compress/zstd/dict.go
@@ -273,6 +273,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) {
enc.Encode(&block, b)
addValues(&remain, block.literals)
litTotal += len(block.literals)
+ if len(block.sequences) == 0 {
+ continue
+ }
seqs += len(block.sequences)
block.genCodes()
addHist(&ll, block.coders.llEnc.Histogram())
@@ -286,6 +289,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) {
if offset == 0 {
continue
}
+ if int(offset) >= len(o.History) {
+ continue
+ }
if offset > 3 {
newOffsets[offset-3]++
} else {
@@ -336,6 +342,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) {
if seqs/nUsed < 512 {
// Use 512 as minimum.
nUsed = seqs / 512
+ if nUsed == 0 {
+ nUsed = 1
+ }
}
copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) {
hist := dst.Histogram()
@@ -358,6 +367,28 @@ func BuildDict(o BuildDictOptions) ([]byte, error) {
fakeLength += v
hist[i] = uint32(v)
}
+
+ // Ensure we aren't trying to represent RLE.
+ if maxCount == fakeLength {
+ for i := range hist {
+ if uint8(i) == maxSym {
+ fakeLength++
+ maxSym++
+ hist[i+1] = 1
+ if maxSym > 1 {
+ break
+ }
+ }
+ if hist[0] == 0 {
+ fakeLength++
+ hist[i] = 1
+ if maxSym > 1 {
+ break
+ }
+ }
+ }
+ }
+
dst.HistogramFinished(maxSym, maxCount)
dst.reUsed = false
dst.useRLE = false
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
index 17901e0804..ae7d4d3295 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
@@ -162,12 +162,12 @@ finalize:
MOVD h, ret+24(FP)
RET
-// func writeBlocks(d *Digest, b []byte) int
+// func writeBlocks(s *Digest, b []byte) int
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
LDP ·primes+0(SB), (prime1, prime2)
// Load state. Assume v[1-4] are stored contiguously.
- MOVD d+0(FP), digest
+ MOVD s+0(FP), digest
LDP 0(digest), (v1, v2)
LDP 16(digest), (v3, v4)
diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s
index 9a7655c0f7..0782b86e3d 100644
--- a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s
+++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s
@@ -5,7 +5,6 @@
#include "textflag.h"
// func matchLen(a []byte, b []byte) int
-// Requires: BMI
TEXT ·matchLen(SB), NOSPLIT, $0-56
MOVQ a_base+0(FP), AX
MOVQ b_base+24(FP), CX
@@ -17,17 +16,16 @@ TEXT ·matchLen(SB), NOSPLIT, $0-56
JB matchlen_match4_standalone
matchlen_loopback_standalone:
- MOVQ (AX)(SI*1), BX
- XORQ (CX)(SI*1), BX
- TESTQ BX, BX
- JZ matchlen_loop_standalone
+ MOVQ (AX)(SI*1), BX
+ XORQ (CX)(SI*1), BX
+ JZ matchlen_loop_standalone
#ifdef GOAMD64_v3
TZCNTQ BX, BX
#else
BSFQ BX, BX
#endif
- SARQ $0x03, BX
+ SHRL $0x03, BX
LEAL (SI)(BX*1), SI
JMP gen_match_len_end
diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go
index 3b974754c3..f9057fd273 100644
--- a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go
+++ b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go
@@ -25,15 +25,18 @@ package runtime
import (
"errors"
- "math"
cg "go.uber.org/automaxprocs/internal/cgroups"
)
// CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process
-// to a valid GOMAXPROCS value.
-func CPUQuotaToGOMAXPROCS(minValue int) (int, CPUQuotaStatus, error) {
- cgroups, err := newQueryer()
+// to a valid GOMAXPROCS value. The quota is converted from float to int using round.
+// If round == nil, DefaultRoundFunc is used.
+func CPUQuotaToGOMAXPROCS(minValue int, round func(v float64) int) (int, CPUQuotaStatus, error) {
+ if round == nil {
+ round = DefaultRoundFunc
+ }
+ cgroups, err := _newQueryer()
if err != nil {
return -1, CPUQuotaUndefined, err
}
@@ -43,7 +46,7 @@ func CPUQuotaToGOMAXPROCS(minValue int) (int, CPUQuotaStatus, error) {
return -1, CPUQuotaUndefined, err
}
- maxProcs := int(math.Floor(quota))
+ maxProcs := round(quota)
if minValue > 0 && maxProcs < minValue {
return minValue, CPUQuotaMinUsed, nil
}
@@ -57,6 +60,7 @@ type queryer interface {
var (
_newCgroups2 = cg.NewCGroups2ForCurrentProcess
_newCgroups = cg.NewCGroupsForCurrentProcess
+ _newQueryer = newQueryer
)
func newQueryer() (queryer, error) {
diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go
index 6922554484..e74701508e 100644
--- a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go
+++ b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go
@@ -26,6 +26,6 @@ package runtime
// CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process
// to a valid GOMAXPROCS value. This is Linux-specific and not supported in the
// current OS.
-func CPUQuotaToGOMAXPROCS(_ int) (int, CPUQuotaStatus, error) {
+func CPUQuotaToGOMAXPROCS(_ int, _ func(v float64) int) (int, CPUQuotaStatus, error) {
return -1, CPUQuotaUndefined, nil
}
diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go b/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go
index df6eacf053..f8a2834ac0 100644
--- a/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go
+++ b/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go
@@ -20,6 +20,8 @@
package runtime
+import "math"
+
// CPUQuotaStatus presents the status of how CPU quota is used
type CPUQuotaStatus int
@@ -31,3 +33,8 @@ const (
// CPUQuotaMinUsed is returned when CPU quota is smaller than the min value
CPUQuotaMinUsed
)
+
+// DefaultRoundFunc is the default function to convert CPU quota from float to int. It rounds the value down (floor).
+func DefaultRoundFunc(v float64) int {
+ return int(math.Floor(v))
+}
diff --git a/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go b/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go
index 98176d6457..e561fe60b2 100644
--- a/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go
+++ b/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go
@@ -37,9 +37,10 @@ func currentMaxProcs() int {
}
type config struct {
- printf func(string, ...interface{})
- procs func(int) (int, iruntime.CPUQuotaStatus, error)
- minGOMAXPROCS int
+ printf func(string, ...interface{})
+ procs func(int, func(v float64) int) (int, iruntime.CPUQuotaStatus, error)
+ minGOMAXPROCS int
+ roundQuotaFunc func(v float64) int
}
func (c *config) log(fmt string, args ...interface{}) {
@@ -71,6 +72,13 @@ func Min(n int) Option {
})
}
+// RoundQuotaFunc sets the function that will be used to covert the CPU quota from float to int.
+func RoundQuotaFunc(rf func(v float64) int) Option {
+ return optionFunc(func(cfg *config) {
+ cfg.roundQuotaFunc = rf
+ })
+}
+
type optionFunc func(*config)
func (of optionFunc) apply(cfg *config) { of(cfg) }
@@ -82,8 +90,9 @@ func (of optionFunc) apply(cfg *config) { of(cfg) }
// configured CPU quota.
func Set(opts ...Option) (func(), error) {
cfg := &config{
- procs: iruntime.CPUQuotaToGOMAXPROCS,
- minGOMAXPROCS: 1,
+ procs: iruntime.CPUQuotaToGOMAXPROCS,
+ roundQuotaFunc: iruntime.DefaultRoundFunc,
+ minGOMAXPROCS: 1,
}
for _, o := range opts {
o.apply(cfg)
@@ -102,7 +111,7 @@ func Set(opts ...Option) (func(), error) {
return undoNoop, nil
}
- maxProcs, status, err := cfg.procs(cfg.minGOMAXPROCS)
+ maxProcs, status, err := cfg.procs(cfg.minGOMAXPROCS, cfg.roundQuotaFunc)
if err != nil {
return undoNoop, err
}
diff --git a/vendor/go.uber.org/automaxprocs/maxprocs/version.go b/vendor/go.uber.org/automaxprocs/maxprocs/version.go
index 108a95535e..cc7fc5aee1 100644
--- a/vendor/go.uber.org/automaxprocs/maxprocs/version.go
+++ b/vendor/go.uber.org/automaxprocs/maxprocs/version.go
@@ -21,4 +21,4 @@
package maxprocs
// Version is the current package version.
-const Version = "1.5.2"
+const Version = "1.6.0"
diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh
index d07dd09eb5..e14b766a32 100644
--- a/vendor/golang.org/x/sys/unix/mkerrors.sh
+++ b/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -552,6 +552,7 @@ ccflags="$@"
$2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ &&
$2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ ||
$2 ~ /^SOCK_|SK_DIAG_|SKNLGRP_$/ ||
+ $2 ~ /^(CONNECT|SAE)_/ ||
$2 ~ /^FIORDCHK$/ ||
$2 ~ /^SIOC/ ||
$2 ~ /^TIOC/ ||
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go
index 2d15200adb..099867deed 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go
@@ -566,6 +566,43 @@ func PthreadFchdir(fd int) (err error) {
return pthread_fchdir_np(fd)
}
+// Connectx calls connectx(2) to initiate a connection on a socket.
+//
+// srcIf, srcAddr, and dstAddr are filled into a [SaEndpoints] struct and passed as the endpoints argument.
+//
+// - srcIf is the optional source interface index. 0 means unspecified.
+// - srcAddr is the optional source address. nil means unspecified.
+// - dstAddr is the destination address.
+//
+// On success, Connectx returns the number of bytes enqueued for transmission.
+func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocID, flags uint32, iov []Iovec, connid *SaeConnID) (n uintptr, err error) {
+ endpoints := SaEndpoints{
+ Srcif: srcIf,
+ }
+
+ if srcAddr != nil {
+ addrp, addrlen, err := srcAddr.sockaddr()
+ if err != nil {
+ return 0, err
+ }
+ endpoints.Srcaddr = (*RawSockaddr)(addrp)
+ endpoints.Srcaddrlen = uint32(addrlen)
+ }
+
+ if dstAddr != nil {
+ addrp, addrlen, err := dstAddr.sockaddr()
+ if err != nil {
+ return 0, err
+ }
+ endpoints.Dstaddr = (*RawSockaddr)(addrp)
+ endpoints.Dstaddrlen = uint32(addrlen)
+ }
+
+ err = connectx(fd, &endpoints, associd, flags, iov, &n, connid)
+ return
+}
+
+//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error)
//sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error)
//sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd.go b/vendor/golang.org/x/sys/unix/syscall_hurd.go
index ba46651f8e..a6a2d2fc2b 100644
--- a/vendor/golang.org/x/sys/unix/syscall_hurd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_hurd.go
@@ -11,6 +11,7 @@ package unix
int ioctl(int, unsigned long int, uintptr_t);
*/
import "C"
+import "unsafe"
func ioctl(fd int, req uint, arg uintptr) (err error) {
r0, er := C.ioctl(C.int(fd), C.ulong(req), C.uintptr_t(arg))
diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go
index 4308ac1772..d73c4652e6 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go
@@ -237,6 +237,9 @@ const (
CLOCK_UPTIME_RAW_APPROX = 0x9
CLONE_NOFOLLOW = 0x1
CLONE_NOOWNERCOPY = 0x2
+ CONNECT_DATA_AUTHENTICATED = 0x4
+ CONNECT_DATA_IDEMPOTENT = 0x2
+ CONNECT_RESUME_ON_READ_WRITE = 0x1
CR0 = 0x0
CR1 = 0x1000
CR2 = 0x2000
@@ -1265,6 +1268,10 @@ const (
RTV_SSTHRESH = 0x20
RUSAGE_CHILDREN = -0x1
RUSAGE_SELF = 0x0
+ SAE_ASSOCID_ALL = 0xffffffff
+ SAE_ASSOCID_ANY = 0x0
+ SAE_CONNID_ALL = 0xffffffff
+ SAE_CONNID_ANY = 0x0
SCM_CREDS = 0x3
SCM_RIGHTS = 0x1
SCM_TIMESTAMP = 0x2
diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go
index c8068a7a16..4a55a40058 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go
@@ -237,6 +237,9 @@ const (
CLOCK_UPTIME_RAW_APPROX = 0x9
CLONE_NOFOLLOW = 0x1
CLONE_NOOWNERCOPY = 0x2
+ CONNECT_DATA_AUTHENTICATED = 0x4
+ CONNECT_DATA_IDEMPOTENT = 0x2
+ CONNECT_RESUME_ON_READ_WRITE = 0x1
CR0 = 0x0
CR1 = 0x1000
CR2 = 0x2000
@@ -1265,6 +1268,10 @@ const (
RTV_SSTHRESH = 0x20
RUSAGE_CHILDREN = -0x1
RUSAGE_SELF = 0x0
+ SAE_ASSOCID_ALL = 0xffffffff
+ SAE_ASSOCID_ANY = 0x0
+ SAE_CONNID_ALL = 0xffffffff
+ SAE_CONNID_ANY = 0x0
SCM_CREDS = 0x3
SCM_RIGHTS = 0x1
SCM_TIMESTAMP = 0x2
diff --git a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go
index da08b2ab3d..1ec2b1407b 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go
@@ -581,6 +581,8 @@ const (
AT_EMPTY_PATH = 0x1000
AT_REMOVEDIR = 0x200
RENAME_NOREPLACE = 1 << 0
+ ST_RDONLY = 1
+ ST_NOSUID = 2
)
const (
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
index b622533ef2..24b346e1a3 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
@@ -841,6 +841,26 @@ var libc_pthread_fchdir_np_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) {
+ var _p0 unsafe.Pointer
+ if len(iov) > 0 {
+ _p0 = unsafe.Pointer(&iov[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_connectx_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) {
_, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags))
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
index cfe6646baf..ebd213100b 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
@@ -248,6 +248,11 @@ TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8
DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB)
+TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_connectx(SB)
+GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8
+DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB)
+
TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sendfile(SB)
GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
index 13f624f69f..824b9c2d5e 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
@@ -841,6 +841,26 @@ var libc_pthread_fchdir_np_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) {
+ var _p0 unsafe.Pointer
+ if len(iov) > 0 {
+ _p0 = unsafe.Pointer(&iov[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_connectx_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) {
_, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags))
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
index fe222b75df..4f178a2293 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
@@ -248,6 +248,11 @@ TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8
DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB)
+TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_connectx(SB)
+GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8
+DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB)
+
TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sendfile(SB)
GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8
diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go
index 091d107f3a..d003c3d437 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go
@@ -306,6 +306,19 @@ type XVSockPgen struct {
type _Socklen uint32
+type SaeAssocID uint32
+
+type SaeConnID uint32
+
+type SaEndpoints struct {
+ Srcif uint32
+ Srcaddr *RawSockaddr
+ Srcaddrlen uint32
+ Dstaddr *RawSockaddr
+ Dstaddrlen uint32
+ _ [4]byte
+}
+
type Xucred struct {
Version uint32
Uid uint32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go
index 28ff4ef74d..0d45a941aa 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go
@@ -306,6 +306,19 @@ type XVSockPgen struct {
type _Socklen uint32
+type SaeAssocID uint32
+
+type SaeConnID uint32
+
+type SaEndpoints struct {
+ Srcif uint32
+ Srcaddr *RawSockaddr
+ Srcaddrlen uint32
+ Dstaddr *RawSockaddr
+ Dstaddrlen uint32
+ _ [4]byte
+}
+
type Xucred struct {
Version uint32
Uid uint32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go
index 6cbd094a3a..51e13eb055 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go
@@ -625,6 +625,7 @@ const (
POLLRDNORM = 0x40
POLLWRBAND = 0x100
POLLWRNORM = 0x4
+ POLLRDHUP = 0x4000
)
type CapRights struct {
diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go
index 7c03b6ee77..d002d8ef3c 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go
@@ -630,6 +630,7 @@ const (
POLLRDNORM = 0x40
POLLWRBAND = 0x100
POLLWRNORM = 0x4
+ POLLRDHUP = 0x4000
)
type CapRights struct {
diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go
index 422107ee8b..3f863d898d 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go
@@ -616,6 +616,7 @@ const (
POLLRDNORM = 0x40
POLLWRBAND = 0x100
POLLWRNORM = 0x4
+ POLLRDHUP = 0x4000
)
type CapRights struct {
diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go
index 505a12acfd..61c7293106 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go
@@ -610,6 +610,7 @@ const (
POLLRDNORM = 0x40
POLLWRBAND = 0x100
POLLWRNORM = 0x4
+ POLLRDHUP = 0x4000
)
type CapRights struct {
diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go
index cc986c7900..b5d17414f0 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go
@@ -612,6 +612,7 @@ const (
POLLRDNORM = 0x40
POLLWRBAND = 0x100
POLLWRNORM = 0x4
+ POLLRDHUP = 0x4000
)
type CapRights struct {
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go
index 7f1961b907..9f2550dc31 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go
@@ -2486,7 +2486,7 @@ type XDPMmapOffsets struct {
type XDPUmemReg struct {
Addr uint64
Len uint64
- Chunk_size uint32
+ Size uint32
Headroom uint32
Flags uint32
Tx_metadata_len uint32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
index 15adc04142..ad05b51a60 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
@@ -727,6 +727,37 @@ const (
RISCV_HWPROBE_EXT_ZBA = 0x8
RISCV_HWPROBE_EXT_ZBB = 0x10
RISCV_HWPROBE_EXT_ZBS = 0x20
+ RISCV_HWPROBE_EXT_ZICBOZ = 0x40
+ RISCV_HWPROBE_EXT_ZBC = 0x80
+ RISCV_HWPROBE_EXT_ZBKB = 0x100
+ RISCV_HWPROBE_EXT_ZBKC = 0x200
+ RISCV_HWPROBE_EXT_ZBKX = 0x400
+ RISCV_HWPROBE_EXT_ZKND = 0x800
+ RISCV_HWPROBE_EXT_ZKNE = 0x1000
+ RISCV_HWPROBE_EXT_ZKNH = 0x2000
+ RISCV_HWPROBE_EXT_ZKSED = 0x4000
+ RISCV_HWPROBE_EXT_ZKSH = 0x8000
+ RISCV_HWPROBE_EXT_ZKT = 0x10000
+ RISCV_HWPROBE_EXT_ZVBB = 0x20000
+ RISCV_HWPROBE_EXT_ZVBC = 0x40000
+ RISCV_HWPROBE_EXT_ZVKB = 0x80000
+ RISCV_HWPROBE_EXT_ZVKG = 0x100000
+ RISCV_HWPROBE_EXT_ZVKNED = 0x200000
+ RISCV_HWPROBE_EXT_ZVKNHA = 0x400000
+ RISCV_HWPROBE_EXT_ZVKNHB = 0x800000
+ RISCV_HWPROBE_EXT_ZVKSED = 0x1000000
+ RISCV_HWPROBE_EXT_ZVKSH = 0x2000000
+ RISCV_HWPROBE_EXT_ZVKT = 0x4000000
+ RISCV_HWPROBE_EXT_ZFH = 0x8000000
+ RISCV_HWPROBE_EXT_ZFHMIN = 0x10000000
+ RISCV_HWPROBE_EXT_ZIHINTNTL = 0x20000000
+ RISCV_HWPROBE_EXT_ZVFH = 0x40000000
+ RISCV_HWPROBE_EXT_ZVFHMIN = 0x80000000
+ RISCV_HWPROBE_EXT_ZFA = 0x100000000
+ RISCV_HWPROBE_EXT_ZTSO = 0x200000000
+ RISCV_HWPROBE_EXT_ZACAS = 0x400000000
+ RISCV_HWPROBE_EXT_ZICOND = 0x800000000
+ RISCV_HWPROBE_EXT_ZIHINTPAUSE = 0x1000000000
RISCV_HWPROBE_KEY_CPUPERF_0 = 0x5
RISCV_HWPROBE_MISALIGNED_UNKNOWN = 0x0
RISCV_HWPROBE_MISALIGNED_EMULATED = 0x1
@@ -734,4 +765,6 @@ const (
RISCV_HWPROBE_MISALIGNED_FAST = 0x3
RISCV_HWPROBE_MISALIGNED_UNSUPPORTED = 0x4
RISCV_HWPROBE_MISALIGNED_MASK = 0x7
+ RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE = 0x6
+ RISCV_HWPROBE_WHICH_CPUS = 0x1
)
diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go
index 1fa34fd17c..5cee9a3143 100644
--- a/vendor/golang.org/x/sys/windows/syscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/syscall_windows.go
@@ -313,6 +313,10 @@ func NewCallbackCDecl(fn interface{}) uintptr {
//sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode
//sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo
//sys setConsoleCursorPosition(console Handle, position uint32) (err error) = kernel32.SetConsoleCursorPosition
+//sys GetConsoleCP() (cp uint32, err error) = kernel32.GetConsoleCP
+//sys GetConsoleOutputCP() (cp uint32, err error) = kernel32.GetConsoleOutputCP
+//sys SetConsoleCP(cp uint32) (err error) = kernel32.SetConsoleCP
+//sys SetConsoleOutputCP(cp uint32) (err error) = kernel32.SetConsoleOutputCP
//sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW
//sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW
//sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole
diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go
index 3f03b3d57c..7b97a154c9 100644
--- a/vendor/golang.org/x/sys/windows/types_windows.go
+++ b/vendor/golang.org/x/sys/windows/types_windows.go
@@ -1060,6 +1060,7 @@ const (
SIO_GET_EXTENSION_FUNCTION_POINTER = IOC_INOUT | IOC_WS2 | 6
SIO_KEEPALIVE_VALS = IOC_IN | IOC_VENDOR | 4
SIO_UDP_CONNRESET = IOC_IN | IOC_VENDOR | 12
+ SIO_UDP_NETRESET = IOC_IN | IOC_VENDOR | 15
// cf. http://support.microsoft.com/default.aspx?scid=kb;en-us;257460
diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
index 9bb979a3e4..4c2e1bdc01 100644
--- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
@@ -247,7 +247,9 @@ var (
procGetCommandLineW = modkernel32.NewProc("GetCommandLineW")
procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW")
procGetComputerNameW = modkernel32.NewProc("GetComputerNameW")
+ procGetConsoleCP = modkernel32.NewProc("GetConsoleCP")
procGetConsoleMode = modkernel32.NewProc("GetConsoleMode")
+ procGetConsoleOutputCP = modkernel32.NewProc("GetConsoleOutputCP")
procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo")
procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW")
procGetCurrentProcessId = modkernel32.NewProc("GetCurrentProcessId")
@@ -347,8 +349,10 @@ var (
procSetCommMask = modkernel32.NewProc("SetCommMask")
procSetCommState = modkernel32.NewProc("SetCommState")
procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts")
+ procSetConsoleCP = modkernel32.NewProc("SetConsoleCP")
procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition")
procSetConsoleMode = modkernel32.NewProc("SetConsoleMode")
+ procSetConsoleOutputCP = modkernel32.NewProc("SetConsoleOutputCP")
procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW")
procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories")
procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW")
@@ -2162,6 +2166,15 @@ func GetComputerName(buf *uint16, n *uint32) (err error) {
return
}
+func GetConsoleCP() (cp uint32, err error) {
+ r0, _, e1 := syscall.Syscall(procGetConsoleCP.Addr(), 0, 0, 0, 0)
+ cp = uint32(r0)
+ if cp == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
func GetConsoleMode(console Handle, mode *uint32) (err error) {
r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0)
if r1 == 0 {
@@ -2170,6 +2183,15 @@ func GetConsoleMode(console Handle, mode *uint32) (err error) {
return
}
+func GetConsoleOutputCP() (cp uint32, err error) {
+ r0, _, e1 := syscall.Syscall(procGetConsoleOutputCP.Addr(), 0, 0, 0, 0)
+ cp = uint32(r0)
+ if cp == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) {
r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0)
if r1 == 0 {
@@ -3038,6 +3060,14 @@ func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) {
return
}
+func SetConsoleCP(cp uint32) (err error) {
+ r1, _, e1 := syscall.Syscall(procSetConsoleCP.Addr(), 1, uintptr(cp), 0, 0)
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
func setConsoleCursorPosition(console Handle, position uint32) (err error) {
r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(position), 0)
if r1 == 0 {
@@ -3054,6 +3084,14 @@ func SetConsoleMode(console Handle, mode uint32) (err error) {
return
}
+func SetConsoleOutputCP(cp uint32) (err error) {
+ r1, _, e1 := syscall.Syscall(procSetConsoleOutputCP.Addr(), 1, uintptr(cp), 0, 0)
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
func SetCurrentDirectory(path *uint16) (err error) {
r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
if r1 == 0 {
diff --git a/vendor/golang.org/x/term/term_windows.go b/vendor/golang.org/x/term/term_windows.go
index 465f560604..df6bf948e1 100644
--- a/vendor/golang.org/x/term/term_windows.go
+++ b/vendor/golang.org/x/term/term_windows.go
@@ -26,6 +26,7 @@ func makeRaw(fd int) (*State, error) {
return nil, err
}
raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT)
+ raw |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT
if err := windows.SetConsoleMode(windows.Handle(fd), raw); err != nil {
return nil, err
}
diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go
index c2b4b711b5..8f7afcb5df 100644
--- a/vendor/golang.org/x/tools/go/packages/external.go
+++ b/vendor/golang.org/x/tools/go/packages/external.go
@@ -82,7 +82,7 @@ type DriverResponse struct {
type driver func(cfg *Config, patterns ...string) (*DriverResponse, error)
// findExternalDriver returns the file path of a tool that supplies
-// the build system package structure, or "" if not found."
+// the build system package structure, or "" if not found.
// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its
// value, otherwise it searches for a binary named gopackagesdriver on the PATH.
func findExternalDriver(cfg *Config) driver {
diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases.go b/vendor/golang.org/x/tools/internal/aliases/aliases.go
index c24c2eee45..f7798e3354 100644
--- a/vendor/golang.org/x/tools/internal/aliases/aliases.go
+++ b/vendor/golang.org/x/tools/internal/aliases/aliases.go
@@ -22,11 +22,17 @@ import (
// GODEBUG=gotypesalias=... by invoking the type checker. The Enabled
// function is expensive and should be called once per task (e.g.
// package import), not once per call to NewAlias.
-func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type) *types.TypeName {
+//
+// Precondition: enabled || len(tparams)==0.
+// If materialized aliases are disabled, there must not be any type parameters.
+func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type, tparams []*types.TypeParam) *types.TypeName {
if enabled {
tname := types.NewTypeName(pos, pkg, name, nil)
- newAlias(tname, rhs)
+ newAlias(tname, rhs, tparams)
return tname
}
+ if len(tparams) > 0 {
+ panic("cannot create an alias with type parameters when gotypesalias is not enabled")
+ }
return types.NewTypeName(pos, pkg, name, rhs)
}
diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go
index 6652f7db0f..a775fcc4be 100644
--- a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go
+++ b/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go
@@ -27,7 +27,9 @@ func Origin(alias *Alias) *Alias { panic("unreachabl
// Unalias returns the type t for go <=1.21.
func Unalias(t types.Type) types.Type { return t }
-func newAlias(name *types.TypeName, rhs types.Type) *Alias { panic("unreachable") }
+func newAlias(name *types.TypeName, rhs types.Type, tparams []*types.TypeParam) *Alias {
+ panic("unreachable")
+}
// Enabled reports whether [NewAlias] should create [types.Alias] types.
//
diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go
index 3ef1afeb40..31c159e42e 100644
--- a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go
+++ b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go
@@ -70,10 +70,9 @@ func Unalias(t types.Type) types.Type { return types.Unalias(t) }
// newAlias is an internal alias around types.NewAlias.
// Direct usage is discouraged as the moment.
// Try to use NewAlias instead.
-func newAlias(tname *types.TypeName, rhs types.Type) *Alias {
+func newAlias(tname *types.TypeName, rhs types.Type, tparams []*types.TypeParam) *Alias {
a := types.NewAlias(tname, rhs)
- // TODO(go.dev/issue/65455): Remove kludgy workaround to set a.actual as a side-effect.
- Unalias(a)
+ SetTypeParams(a, tparams)
return a
}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
index deeb67f315..5f283281a2 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
@@ -2,9 +2,227 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Indexed binary package export.
-// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go;
-// see that file for specification of the format.
+// Indexed package export.
+//
+// The indexed export data format is an evolution of the previous
+// binary export data format. Its chief contribution is introducing an
+// index table, which allows efficient random access of individual
+// declarations and inline function bodies. In turn, this allows
+// avoiding unnecessary work for compilation units that import large
+// packages.
+//
+//
+// The top-level data format is structured as:
+//
+// Header struct {
+// Tag byte // 'i'
+// Version uvarint
+// StringSize uvarint
+// DataSize uvarint
+// }
+//
+// Strings [StringSize]byte
+// Data [DataSize]byte
+//
+// MainIndex []struct{
+// PkgPath stringOff
+// PkgName stringOff
+// PkgHeight uvarint
+//
+// Decls []struct{
+// Name stringOff
+// Offset declOff
+// }
+// }
+//
+// Fingerprint [8]byte
+//
+// uvarint means a uint64 written out using uvarint encoding.
+//
+// []T means a uvarint followed by that many T objects. In other
+// words:
+//
+// Len uvarint
+// Elems [Len]T
+//
+// stringOff means a uvarint that indicates an offset within the
+// Strings section. At that offset is another uvarint, followed by
+// that many bytes, which form the string value.
+//
+// declOff means a uvarint that indicates an offset within the Data
+// section where the associated declaration can be found.
+//
+//
+// There are five kinds of declarations, distinguished by their first
+// byte:
+//
+// type Var struct {
+// Tag byte // 'V'
+// Pos Pos
+// Type typeOff
+// }
+//
+// type Func struct {
+// Tag byte // 'F' or 'G'
+// Pos Pos
+// TypeParams []typeOff // only present if Tag == 'G'
+// Signature Signature
+// }
+//
+// type Const struct {
+// Tag byte // 'C'
+// Pos Pos
+// Value Value
+// }
+//
+// type Type struct {
+// Tag byte // 'T' or 'U'
+// Pos Pos
+// TypeParams []typeOff // only present if Tag == 'U'
+// Underlying typeOff
+//
+// Methods []struct{ // omitted if Underlying is an interface type
+// Pos Pos
+// Name stringOff
+// Recv Param
+// Signature Signature
+// }
+// }
+//
+// type Alias struct {
+// Tag byte // 'A' or 'B'
+// Pos Pos
+// TypeParams []typeOff // only present if Tag == 'B'
+// Type typeOff
+// }
+//
+// // "Automatic" declaration of each typeparam
+// type TypeParam struct {
+// Tag byte // 'P'
+// Pos Pos
+// Implicit bool
+// Constraint typeOff
+// }
+//
+// typeOff means a uvarint that either indicates a predeclared type,
+// or an offset into the Data section. If the uvarint is less than
+// predeclReserved, then it indicates the index into the predeclared
+// types list (see predeclared in bexport.go for order). Otherwise,
+// subtracting predeclReserved yields the offset of a type descriptor.
+//
+// Value means a type, kind, and type-specific value. See
+// (*exportWriter).value for details.
+//
+//
+// There are twelve kinds of type descriptors, distinguished by an itag:
+//
+// type DefinedType struct {
+// Tag itag // definedType
+// Name stringOff
+// PkgPath stringOff
+// }
+//
+// type PointerType struct {
+// Tag itag // pointerType
+// Elem typeOff
+// }
+//
+// type SliceType struct {
+// Tag itag // sliceType
+// Elem typeOff
+// }
+//
+// type ArrayType struct {
+// Tag itag // arrayType
+// Len uint64
+// Elem typeOff
+// }
+//
+// type ChanType struct {
+// Tag itag // chanType
+// Dir uint64 // 1 RecvOnly; 2 SendOnly; 3 SendRecv
+// Elem typeOff
+// }
+//
+// type MapType struct {
+// Tag itag // mapType
+// Key typeOff
+// Elem typeOff
+// }
+//
+// type FuncType struct {
+// Tag itag // signatureType
+// PkgPath stringOff
+// Signature Signature
+// }
+//
+// type StructType struct {
+// Tag itag // structType
+// PkgPath stringOff
+// Fields []struct {
+// Pos Pos
+// Name stringOff
+// Type typeOff
+// Embedded bool
+// Note stringOff
+// }
+// }
+//
+// type InterfaceType struct {
+// Tag itag // interfaceType
+// PkgPath stringOff
+// Embeddeds []struct {
+// Pos Pos
+// Type typeOff
+// }
+// Methods []struct {
+// Pos Pos
+// Name stringOff
+// Signature Signature
+// }
+// }
+//
+// // Reference to a type param declaration
+// type TypeParamType struct {
+// Tag itag // typeParamType
+// Name stringOff
+// PkgPath stringOff
+// }
+//
+// // Instantiation of a generic type (like List[T2] or List[int])
+// type InstanceType struct {
+// Tag itag // instanceType
+// Pos pos
+// TypeArgs []typeOff
+// BaseType typeOff
+// }
+//
+// type UnionType struct {
+// Tag itag // interfaceType
+// Terms []struct {
+// tilde bool
+// Type typeOff
+// }
+// }
+//
+//
+//
+// type Signature struct {
+// Params []Param
+// Results []Param
+// Variadic bool // omitted if Results is empty
+// }
+//
+// type Param struct {
+// Pos Pos
+// Name stringOff
+// Type typOff
+// }
+//
+//
+// Pos encodes a file:line:column triple, incorporating a simple delta
+// encoding scheme within a data object. See exportWriter.pos for
+// details.
package gcimporter
@@ -523,9 +741,22 @@ func (p *iexporter) doDecl(obj types.Object) {
}
if obj.IsAlias() {
- w.tag(aliasTag)
+ alias, materialized := t.(*aliases.Alias) // may fail when aliases are not enabled
+
+ var tparams *types.TypeParamList
+ if materialized {
+ tparams = aliases.TypeParams(alias)
+ }
+ if tparams.Len() == 0 {
+ w.tag(aliasTag)
+ } else {
+ w.tag(genericAliasTag)
+ }
w.pos(obj.Pos())
- if alias, ok := t.(*aliases.Alias); ok {
+ if tparams.Len() > 0 {
+ w.tparamList(obj.Name(), tparams, obj.Pkg())
+ }
+ if materialized {
// Preserve materialized aliases,
// even of non-exported types.
t = aliases.Rhs(alias)
@@ -745,7 +976,13 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
}
switch t := t.(type) {
case *aliases.Alias:
- // TODO(adonovan): support parameterized aliases, following *types.Named.
+ if targs := aliases.TypeArgs(t); targs.Len() > 0 {
+ w.startType(instanceType)
+ w.pos(t.Obj().Pos())
+ w.typeList(targs, pkg)
+ w.typ(aliases.Origin(t), pkg)
+ return
+ }
w.startType(aliasType)
w.qualifiedType(t.Obj())
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
index 136aa03653..ed2d562959 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
@@ -3,7 +3,7 @@
// license that can be found in the LICENSE file.
// Indexed package import.
-// See cmd/compile/internal/gc/iexport.go for the export data format.
+// See iexport.go for the export data format.
// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go.
@@ -562,14 +562,14 @@ func (r *importReader) obj(name string) {
pos := r.pos()
switch tag {
- case aliasTag:
+ case aliasTag, genericAliasTag:
+ var tparams []*types.TypeParam
+ if tag == genericAliasTag {
+ tparams = r.tparamList()
+ }
typ := r.typ()
- // TODO(adonovan): support generic aliases:
- // if tag == genericAliasTag {
- // tparams := r.tparamList()
- // alias.SetTypeParams(tparams)
- // }
- r.declare(aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ))
+ obj := aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ, tparams)
+ r.declare(obj)
case constTag:
typ, val := r.value()
@@ -862,7 +862,7 @@ func (r *importReader) string() string { return r.p.stringAt(r.uint64()) }
func (r *importReader) doType(base *types.Named) (res types.Type) {
k := r.kind()
if debug {
- r.p.trace("importing type %d (base: %s)", k, base)
+ r.p.trace("importing type %d (base: %v)", k, base)
r.p.indent++
defer func() {
r.p.indent--
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
index 2c07706887..f0742f5404 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
@@ -52,8 +52,7 @@ func (pr *pkgReader) later(fn func()) {
// See cmd/compile/internal/noder.derivedInfo.
type derivedInfo struct {
- idx pkgbits.Index
- needed bool
+ idx pkgbits.Index
}
// See cmd/compile/internal/noder.typeInfo.
@@ -110,13 +109,17 @@ func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[st
r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic)
pkg := r.pkg()
- r.Bool() // has init
+ if r.Version().Has(pkgbits.HasInit) {
+ r.Bool()
+ }
for i, n := 0, r.Len(); i < n; i++ {
// As if r.obj(), but avoiding the Scope.Lookup call,
// to avoid eager loading of imports.
r.Sync(pkgbits.SyncObject)
- assert(!r.Bool())
+ if r.Version().Has(pkgbits.DerivedFuncInstance) {
+ assert(!r.Bool())
+ }
r.p.objIdx(r.Reloc(pkgbits.RelocObj))
assert(r.Len() == 0)
}
@@ -165,7 +168,7 @@ type readerDict struct {
// tparams is a slice of the constructed TypeParams for the element.
tparams []*types.TypeParam
- // devived is a slice of types derived from tparams, which may be
+ // derived is a slice of types derived from tparams, which may be
// instantiated while reading the current element.
derived []derivedInfo
derivedTypes []types.Type // lazily instantiated from derived
@@ -471,7 +474,9 @@ func (r *reader) param() *types.Var {
func (r *reader) obj() (types.Object, []types.Type) {
r.Sync(pkgbits.SyncObject)
- assert(!r.Bool())
+ if r.Version().Has(pkgbits.DerivedFuncInstance) {
+ assert(!r.Bool())
+ }
pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj))
obj := pkgScope(pkg).Lookup(name)
@@ -525,8 +530,12 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) {
case pkgbits.ObjAlias:
pos := r.pos()
+ var tparams []*types.TypeParam
+ if r.Version().Has(pkgbits.AliasTypeParamNames) {
+ tparams = r.typeParamNames()
+ }
typ := r.typ()
- declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ))
+ declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ, tparams))
case pkgbits.ObjConst:
pos := r.pos()
@@ -632,7 +641,10 @@ func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict {
dict.derived = make([]derivedInfo, r.Len())
dict.derivedTypes = make([]types.Type, len(dict.derived))
for i := range dict.derived {
- dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()}
+ dict.derived[i] = derivedInfo{idx: r.Reloc(pkgbits.RelocType)}
+ if r.Version().Has(pkgbits.DerivedInfoNeeded) {
+ assert(!r.Bool())
+ }
}
pr.retireReader(r)
diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go
index 91221fda32..8555e3f83d 100644
--- a/vendor/golang.org/x/tools/internal/imports/mod.go
+++ b/vendor/golang.org/x/tools/internal/imports/mod.go
@@ -245,7 +245,10 @@ func newModuleResolver(e *ProcessEnv, moduleCacheCache *DirInfoCache) (*ModuleRe
// 2. Use this to separate module cache scanning from other scanning.
func gomodcacheForEnv(goenv map[string]string) string {
if gmc := goenv["GOMODCACHE"]; gmc != "" {
- return gmc
+ // golang/go#67156: ensure that the module cache is clean, since it is
+ // assumed as a prefix to directories scanned by gopathwalk, which are
+ // themselves clean.
+ return filepath.Clean(gmc)
}
gopaths := filepath.SplitList(goenv["GOPATH"])
if len(gopaths) == 0 {
@@ -740,8 +743,8 @@ func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest
func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) directoryPackageInfo {
subdir := ""
- if dir != root.Path {
- subdir = dir[len(root.Path)+len("/"):]
+ if prefix := root.Path + string(filepath.Separator); strings.HasPrefix(dir, prefix) {
+ subdir = dir[len(prefix):]
}
importPath := filepath.ToSlash(subdir)
if strings.HasPrefix(importPath, "vendor/") {
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go
index b92e8e6eb3..f6cb37c5c3 100644
--- a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go
+++ b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go
@@ -21,7 +21,7 @@ import (
// export data.
type PkgDecoder struct {
// version is the file format version.
- version uint32
+ version Version
// sync indicates whether the file uses sync markers.
sync bool
@@ -68,8 +68,6 @@ func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync }
// NewPkgDecoder returns a PkgDecoder initialized to read the Unified
// IR export data from input. pkgPath is the package path for the
// compilation unit that produced the export data.
-//
-// TODO(mdempsky): Remove pkgPath parameter; unneeded since CL 391014.
func NewPkgDecoder(pkgPath, input string) PkgDecoder {
pr := PkgDecoder{
pkgPath: pkgPath,
@@ -80,14 +78,15 @@ func NewPkgDecoder(pkgPath, input string) PkgDecoder {
r := strings.NewReader(input)
- assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil)
+ var ver uint32
+ assert(binary.Read(r, binary.LittleEndian, &ver) == nil)
+ pr.version = Version(ver)
- switch pr.version {
- default:
- panic(fmt.Errorf("unsupported version: %v", pr.version))
- case 0:
- // no flags
- case 1:
+ if pr.version >= numVersions {
+ panic(fmt.Errorf("cannot decode %q, export data version %d is greater than maximum supported version %d", pkgPath, pr.version, numVersions-1))
+ }
+
+ if pr.version.Has(Flags) {
var flags uint32
assert(binary.Read(r, binary.LittleEndian, &flags) == nil)
pr.sync = flags&flagSyncMarkers != 0
@@ -102,7 +101,9 @@ func NewPkgDecoder(pkgPath, input string) PkgDecoder {
assert(err == nil)
pr.elemData = input[pos:]
- assert(len(pr.elemData)-8 == int(pr.elemEnds[len(pr.elemEnds)-1]))
+
+ const fingerprintSize = 8
+ assert(len(pr.elemData)-fingerprintSize == int(pr.elemEnds[len(pr.elemEnds)-1]))
return pr
}
@@ -136,7 +137,7 @@ func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int {
absIdx += int(pr.elemEndsEnds[k-1])
}
if absIdx >= int(pr.elemEndsEnds[k]) {
- errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds)
+ panicf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds)
}
return absIdx
}
@@ -193,9 +194,7 @@ func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder {
Idx: idx,
}
- // TODO(mdempsky) r.data.Reset(...) after #44505 is resolved.
- r.Data = *strings.NewReader(pr.DataIdx(k, idx))
-
+ r.Data.Reset(pr.DataIdx(k, idx))
r.Sync(SyncRelocs)
r.Relocs = make([]RelocEnt, r.Len())
for i := range r.Relocs {
@@ -244,7 +243,7 @@ type Decoder struct {
func (r *Decoder) checkErr(err error) {
if err != nil {
- errorf("unexpected decoding error: %w", err)
+ panicf("unexpected decoding error: %w", err)
}
}
@@ -515,3 +514,6 @@ func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) {
return path, name, tag
}
+
+// Version reports the version of the bitstream.
+func (w *Decoder) Version() Version { return w.common.version }
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go
index 6482617a4f..c17a12399d 100644
--- a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go
+++ b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go
@@ -12,18 +12,15 @@ import (
"io"
"math/big"
"runtime"
+ "strings"
)
-// currentVersion is the current version number.
-//
-// - v0: initial prototype
-//
-// - v1: adds the flags uint32 word
-const currentVersion uint32 = 1
-
// A PkgEncoder provides methods for encoding a package's Unified IR
// export data.
type PkgEncoder struct {
+ // version of the bitstream.
+ version Version
+
// elems holds the bitstream for previously encoded elements.
elems [numRelocs][]string
@@ -47,8 +44,9 @@ func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 }
// export data files, but can help diagnosing desync errors in
// higher-level Unified IR reader/writer code. If syncFrames is
// negative, then sync markers are omitted entirely.
-func NewPkgEncoder(syncFrames int) PkgEncoder {
+func NewPkgEncoder(version Version, syncFrames int) PkgEncoder {
return PkgEncoder{
+ version: version,
stringsIdx: make(map[string]Index),
syncFrames: syncFrames,
}
@@ -64,13 +62,15 @@ func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) {
assert(binary.Write(out, binary.LittleEndian, x) == nil)
}
- writeUint32(currentVersion)
+ writeUint32(uint32(pw.version))
- var flags uint32
- if pw.SyncMarkers() {
- flags |= flagSyncMarkers
+ if pw.version.Has(Flags) {
+ var flags uint32
+ if pw.SyncMarkers() {
+ flags |= flagSyncMarkers
+ }
+ writeUint32(flags)
}
- writeUint32(flags)
// Write elemEndsEnds.
var sum uint32
@@ -159,7 +159,7 @@ type Encoder struct {
// Flush finalizes the element's bitstream and returns its Index.
func (w *Encoder) Flush() Index {
- var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved
+ var sb strings.Builder
// Backup the data so we write the relocations at the front.
var tmp bytes.Buffer
@@ -189,7 +189,7 @@ func (w *Encoder) Flush() Index {
func (w *Encoder) checkErr(err error) {
if err != nil {
- errorf("unexpected encoding error: %v", err)
+ panicf("unexpected encoding error: %v", err)
}
}
@@ -320,8 +320,14 @@ func (w *Encoder) Code(c Code) {
// section (if not already present), and then writing a relocation
// into the element bitstream.
func (w *Encoder) String(s string) {
+ w.StringRef(w.p.StringIdx(s))
+}
+
+// StringRef writes a reference to the given index, which must be a
+// previously encoded string value.
+func (w *Encoder) StringRef(idx Index) {
w.Sync(SyncString)
- w.Reloc(RelocString, w.p.StringIdx(s))
+ w.Reloc(RelocString, idx)
}
// Strings encodes and writes a variable-length slice of strings into
@@ -348,7 +354,7 @@ func (w *Encoder) Value(val constant.Value) {
func (w *Encoder) scalar(val constant.Value) {
switch v := constant.Val(val).(type) {
default:
- errorf("unhandled %v (%v)", val, val.Kind())
+ panicf("unhandled %v (%v)", val, val.Kind())
case bool:
w.Code(ValBool)
w.Bool(v)
@@ -381,3 +387,6 @@ func (w *Encoder) bigFloat(v *big.Float) {
b := v.Append(nil, 'p', -1)
w.String(string(b)) // TODO: More efficient encoding.
}
+
+// Version reports the version of the bitstream.
+func (w *Encoder) Version() Version { return w.p.version }
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go b/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go
deleted file mode 100644
index 5294f6a63e..0000000000
--- a/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.7
-// +build !go1.7
-
-// TODO(mdempsky): Remove after #44505 is resolved
-
-package pkgbits
-
-import "runtime"
-
-func walkFrames(pcs []uintptr, visit frameVisitor) {
- for _, pc := range pcs {
- fn := runtime.FuncForPC(pc)
- file, line := fn.FileLine(pc)
-
- visit(file, line, fn.Name(), pc-fn.Entry())
- }
-}
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go b/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go
deleted file mode 100644
index 2324ae7adf..0000000000
--- a/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.7
-// +build go1.7
-
-package pkgbits
-
-import "runtime"
-
-// walkFrames calls visit for each call frame represented by pcs.
-//
-// pcs should be a slice of PCs, as returned by runtime.Callers.
-func walkFrames(pcs []uintptr, visit frameVisitor) {
- if len(pcs) == 0 {
- return
- }
-
- frames := runtime.CallersFrames(pcs)
- for {
- frame, more := frames.Next()
- visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry)
- if !more {
- return
- }
- }
-}
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/support.go b/vendor/golang.org/x/tools/internal/pkgbits/support.go
index ad26d3b28c..50534a2955 100644
--- a/vendor/golang.org/x/tools/internal/pkgbits/support.go
+++ b/vendor/golang.org/x/tools/internal/pkgbits/support.go
@@ -12,6 +12,6 @@ func assert(b bool) {
}
}
-func errorf(format string, args ...interface{}) {
+func panicf(format string, args ...any) {
panic(fmt.Errorf(format, args...))
}
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/sync.go b/vendor/golang.org/x/tools/internal/pkgbits/sync.go
index 5bd51ef717..1520b73afb 100644
--- a/vendor/golang.org/x/tools/internal/pkgbits/sync.go
+++ b/vendor/golang.org/x/tools/internal/pkgbits/sync.go
@@ -6,6 +6,7 @@ package pkgbits
import (
"fmt"
+ "runtime"
"strings"
)
@@ -23,6 +24,24 @@ func fmtFrames(pcs ...uintptr) []string {
type frameVisitor func(file string, line int, name string, offset uintptr)
+// walkFrames calls visit for each call frame represented by pcs.
+//
+// pcs should be a slice of PCs, as returned by runtime.Callers.
+func walkFrames(pcs []uintptr, visit frameVisitor) {
+ if len(pcs) == 0 {
+ return
+ }
+
+ frames := runtime.CallersFrames(pcs)
+ for {
+ frame, more := frames.Next()
+ visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry)
+ if !more {
+ return
+ }
+ }
+}
+
// SyncMarker is an enum type that represents markers that may be
// written to export data to ensure the reader and writer stay
// synchronized.
@@ -110,4 +129,8 @@ const (
SyncStmtsEnd
SyncLabel
SyncOptLabel
+
+ SyncMultiExpr
+ SyncRType
+ SyncConvRTTI
)
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go b/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go
index 4a5b0ca5f2..582ad56d3e 100644
--- a/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go
+++ b/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go
@@ -74,11 +74,14 @@ func _() {
_ = x[SyncStmtsEnd-64]
_ = x[SyncLabel-65]
_ = x[SyncOptLabel-66]
+ _ = x[SyncMultiExpr-67]
+ _ = x[SyncRType-68]
+ _ = x[SyncConvRTTI-69]
}
-const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabel"
+const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabelMultiExprRTypeConvRTTI"
-var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458}
+var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458, 467, 472, 480}
func (i SyncMarker) String() string {
i -= 1
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/version.go b/vendor/golang.org/x/tools/internal/pkgbits/version.go
new file mode 100644
index 0000000000..53af9df22b
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/pkgbits/version.go
@@ -0,0 +1,85 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+// Version indicates a version of a unified IR bitstream.
+// Each Version indicates the addition, removal, or change of
+// new data in the bitstream.
+//
+// These are serialized to disk and the interpretation remains fixed.
+type Version uint32
+
+const (
+ // V0: initial prototype.
+ //
+ // All data that is not assigned a Field is in version V0
+ // and has not been deprecated.
+ V0 Version = iota
+
+ // V1: adds the Flags uint32 word
+ V1
+
+ // V2: removes unused legacy fields and supports type parameters for aliases.
+ // - remove the legacy "has init" bool from the public root
+ // - remove obj's "derived func instance" bool
+ // - add a TypeParamNames field to ObjAlias
+ // - remove derived info "needed" bool
+ V2
+
+ numVersions = iota
+)
+
+// Field denotes a unit of data in the serialized unified IR bitstream.
+// It is conceptually a like field in a structure.
+//
+// We only really need Fields when the data may or may not be present
+// in a stream based on the Version of the bitstream.
+//
+// Unlike much of pkgbits, Fields are not serialized and
+// can change values as needed.
+type Field int
+
+const (
+ // Flags in a uint32 in the header of a bitstream
+ // that is used to indicate whether optional features are enabled.
+ Flags Field = iota
+
+ // Deprecated: HasInit was a bool indicating whether a package
+ // has any init functions.
+ HasInit
+
+ // Deprecated: DerivedFuncInstance was a bool indicating
+ // whether an object was a function instance.
+ DerivedFuncInstance
+
+ // ObjAlias has a list of TypeParamNames.
+ AliasTypeParamNames
+
+ // Deprecated: DerivedInfoNeeded was a bool indicating
+ // whether a type was a derived type.
+ DerivedInfoNeeded
+
+ numFields = iota
+)
+
+// introduced is the version a field was added.
+var introduced = [numFields]Version{
+ Flags: V1,
+ AliasTypeParamNames: V2,
+}
+
+// removed is the version a field was removed in or 0 for fields
+// that have not yet been deprecated.
+// (So removed[f]-1 is the last version it is included in.)
+var removed = [numFields]Version{
+ HasInit: V2,
+ DerivedFuncInstance: V2,
+ DerivedInfoNeeded: V2,
+}
+
+// Has reports whether field f is present in a bitstream at version v.
+func (v Version) Has(f Field) bool {
+ return introduced[f] <= v && (v < removed[f] || removed[f] == V0)
+}
diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go
index a928acf29f..cdaac9ab34 100644
--- a/vendor/golang.org/x/tools/internal/stdlib/manifest.go
+++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go
@@ -951,7 +951,7 @@ var PackageSymbols = map[string][]Symbol{
{"ParseSessionState", Func, 21},
{"QUICClient", Func, 21},
{"QUICConfig", Type, 21},
- {"QUICConfig.EnableStoreSessionEvent", Field, 23},
+ {"QUICConfig.EnableSessionEvents", Field, 23},
{"QUICConfig.TLSConfig", Field, 21},
{"QUICConn", Type, 21},
{"QUICEncryptionLevel", Type, 21},
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
index 834e05381c..131caab284 100644
--- a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
+++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
@@ -838,7 +838,7 @@ const (
// InvalidCap occurs when an argument to the cap built-in function is not of
// supported type.
//
- // See https://golang.org/ref/spec#Lengthand_capacity for information on
+ // See https://golang.org/ref/spec#Length_and_capacity for information on
// which underlying types are supported as arguments to cap and len.
//
// Example:
@@ -859,7 +859,7 @@ const (
// InvalidCopy occurs when the arguments are not of slice type or do not
// have compatible type.
//
- // See https://golang.org/ref/spec#Appendingand_copying_slices for more
+ // See https://golang.org/ref/spec#Appending_and_copying_slices for more
// information on the type requirements for the copy built-in.
//
// Example:
@@ -897,7 +897,7 @@ const (
// InvalidLen occurs when an argument to the len built-in function is not of
// supported type.
//
- // See https://golang.org/ref/spec#Lengthand_capacity for information on
+ // See https://golang.org/ref/spec#Length_and_capacity for information on
// which underlying types are supported as arguments to cap and len.
//
// Example:
@@ -914,7 +914,7 @@ const (
// InvalidMake occurs when make is called with an unsupported type argument.
//
- // See https://golang.org/ref/spec#Makingslices_maps_and_channels for
+ // See https://golang.org/ref/spec#Making_slices_maps_and_channels for
// information on the types that may be created using make.
//
// Example:
diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md
index 6a8a07781a..5d4096d46a 100644
--- a/vendor/google.golang.org/grpc/MAINTAINERS.md
+++ b/vendor/google.golang.org/grpc/MAINTAINERS.md
@@ -9,21 +9,28 @@ for general contribution guidelines.
## Maintainers (in alphabetical order)
+- [aranjans](https://github.com/aranjans), Google LLC
+- [arjan-bal](https://github.com/arjan-bal), Google LLC
+- [arvindbr8](https://github.com/arvindbr8), Google LLC
- [atollena](https://github.com/atollena), Datadog, Inc.
-- [cesarghali](https://github.com/cesarghali), Google LLC
- [dfawley](https://github.com/dfawley), Google LLC
- [easwars](https://github.com/easwars), Google LLC
-- [menghanl](https://github.com/menghanl), Google LLC
-- [srini100](https://github.com/srini100), Google LLC
+- [erm-g](https://github.com/erm-g), Google LLC
+- [gtcooke94](https://github.com/gtcooke94), Google LLC
+- [purnesh42h](https://github.com/purnesh42h), Google LLC
+- [zasweq](https://github.com/zasweq), Google LLC
## Emeritus Maintainers (in alphabetical order)
-- [adelez](https://github.com/adelez), Google LLC
-- [canguler](https://github.com/canguler), Google LLC
-- [iamqizhao](https://github.com/iamqizhao), Google LLC
-- [jadekler](https://github.com/jadekler), Google LLC
-- [jtattermusch](https://github.com/jtattermusch), Google LLC
-- [lyuxuan](https://github.com/lyuxuan), Google LLC
-- [makmukhi](https://github.com/makmukhi), Google LLC
-- [matt-kwong](https://github.com/matt-kwong), Google LLC
-- [nicolasnoble](https://github.com/nicolasnoble), Google LLC
-- [yongni](https://github.com/yongni), Google LLC
+- [adelez](https://github.com/adelez)
+- [canguler](https://github.com/canguler)
+- [cesarghali](https://github.com/cesarghali)
+- [iamqizhao](https://github.com/iamqizhao)
+- [jeanbza](https://github.com/jeanbza)
+- [jtattermusch](https://github.com/jtattermusch)
+- [lyuxuan](https://github.com/lyuxuan)
+- [makmukhi](https://github.com/makmukhi)
+- [matt-kwong](https://github.com/matt-kwong)
+- [menghanl](https://github.com/menghanl)
+- [nicolasnoble](https://github.com/nicolasnoble)
+- [srini100](https://github.com/srini100)
+- [yongni](https://github.com/yongni)
diff --git a/vendor/google.golang.org/grpc/SECURITY.md b/vendor/google.golang.org/grpc/SECURITY.md
index be6e108705..abab279379 100644
--- a/vendor/google.golang.org/grpc/SECURITY.md
+++ b/vendor/google.golang.org/grpc/SECURITY.md
@@ -1,3 +1,3 @@
# Security Policy
-For information on gRPC Security Policy and reporting potentional security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md).
+For information on gRPC Security Policy and reporting potential security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md).
diff --git a/vendor/google.golang.org/grpc/backoff/backoff.go b/vendor/google.golang.org/grpc/backoff/backoff.go
index 0787d0b50c..d7b40b7cb6 100644
--- a/vendor/google.golang.org/grpc/backoff/backoff.go
+++ b/vendor/google.golang.org/grpc/backoff/backoff.go
@@ -39,7 +39,7 @@ type Config struct {
MaxDelay time.Duration
}
-// DefaultConfig is a backoff configuration with the default values specfied
+// DefaultConfig is a backoff configuration with the default values specified
// at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
//
// This should be useful for callers who want to configure backoff with
diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go
index f391744f72..b181f386a1 100644
--- a/vendor/google.golang.org/grpc/balancer/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/balancer.go
@@ -30,6 +30,7 @@ import (
"google.golang.org/grpc/channelz"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials"
+ estats "google.golang.org/grpc/experimental/stats"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/metadata"
@@ -72,8 +73,21 @@ func unregisterForTesting(name string) {
delete(m, name)
}
+// connectedAddress returns the connected address for a SubConnState. The
+// address is only valid if the state is READY.
+func connectedAddress(scs SubConnState) resolver.Address {
+ return scs.connectedAddress
+}
+
+// setConnectedAddress sets the connected address for a SubConnState.
+func setConnectedAddress(scs *SubConnState, addr resolver.Address) {
+ scs.connectedAddress = addr
+}
+
func init() {
internal.BalancerUnregister = unregisterForTesting
+ internal.ConnectedAddress = connectedAddress
+ internal.SetConnectedAddress = setConnectedAddress
}
// Get returns the resolver builder registered with the given name.
@@ -243,6 +257,10 @@ type BuildOptions struct {
// same resolver.Target as passed to the resolver. See the documentation for
// the resolver.Target type for details about what it contains.
Target resolver.Target
+ // MetricsRecorder is the metrics recorder that balancers can use to record
+ // metrics. Balancer implementations which do not register metrics on
+ // metrics registry and record on them can ignore this field.
+ MetricsRecorder estats.MetricsRecorder
}
// Builder creates a balancer.
@@ -410,6 +428,9 @@ type SubConnState struct {
// ConnectionError is set if the ConnectivityState is TransientFailure,
// describing the reason the SubConn failed. Otherwise, it is nil.
ConnectionError error
+ // connectedAddr contains the connected address when ConnectivityState is
+ // Ready. Otherwise, it is indeterminate.
+ connectedAddress resolver.Address
}
// ClientConnState describes the state of a ClientConn relevant to the
diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go
index a7f1eeec8e..2b87bd79c7 100644
--- a/vendor/google.golang.org/grpc/balancer/base/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go
@@ -36,7 +36,7 @@ type baseBuilder struct {
config Config
}
-func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
+func (bb *baseBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer {
bal := &baseBalancer{
cc: cc,
pickerBuilder: bb.pickerBuilder,
@@ -259,6 +259,6 @@ type errPicker struct {
err error // Pick() always returns this err.
}
-func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
+func (p *errPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
return balancer.PickResult{}, p.err
}
diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
index 07527603f1..4d69b4052f 100644
--- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
+++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
@@ -50,7 +50,7 @@ const (
type pickfirstBuilder struct{}
-func (pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
+func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer {
b := &pickfirstBalancer{cc: cc}
b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b))
return b
@@ -155,7 +155,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
// Endpoints not set, process addresses until we migrate resolver
// emissions fully to Endpoints. The top channel does wrap emitted
// addresses with endpoints, however some balancers such as weighted
- // target do not forwarrd the corresponding correct endpoints down/split
+ // target do not forward the corresponding correct endpoints down/split
// endpoints properly. Once all balancers correctly forward endpoints
// down, can delete this else conditional.
addrs = state.ResolverState.Addresses
diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go
index 4161fdf47a..8ad6ce2f09 100644
--- a/vendor/google.golang.org/grpc/balancer_wrapper.go
+++ b/vendor/google.golang.org/grpc/balancer_wrapper.go
@@ -25,12 +25,15 @@ import (
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/balancer/gracefulswitch"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/resolver"
)
+var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address))
+
// ccBalancerWrapper sits between the ClientConn and the Balancer.
//
// ccBalancerWrapper implements methods corresponding to the ones on the
@@ -79,6 +82,7 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper {
CustomUserAgent: cc.dopts.copts.UserAgent,
ChannelzParent: cc.channelz,
Target: cc.parsedTarget,
+ MetricsRecorder: cc.metricsRecorderList,
},
serializer: grpcsync.NewCallbackSerializer(ctx),
serializerCancel: cancel,
@@ -92,7 +96,7 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper {
// it is safe to call into the balancer here.
func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
errCh := make(chan error)
- ok := ccb.serializer.Schedule(func(ctx context.Context) {
+ uccs := func(ctx context.Context) {
defer close(errCh)
if ctx.Err() != nil || ccb.balancer == nil {
return
@@ -107,17 +111,23 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat
logger.Infof("error from balancer.UpdateClientConnState: %v", err)
}
errCh <- err
- })
- if !ok {
- return nil
}
+ onFailure := func() { close(errCh) }
+
+ // UpdateClientConnState can race with Close, and when the latter wins, the
+ // serializer is closed, and the attempt to schedule the callback will fail.
+ // It is acceptable to ignore this failure. But since we want to handle the
+ // state update in a blocking fashion (when we successfully schedule the
+ // callback), we have to use the ScheduleOr method and not the MaybeSchedule
+ // method on the serializer.
+ ccb.serializer.ScheduleOr(uccs, onFailure)
return <-errCh
}
// resolverError is invoked by grpc to push a resolver error to the underlying
// balancer. The call to the balancer is executed from the serializer.
func (ccb *ccBalancerWrapper) resolverError(err error) {
- ccb.serializer.Schedule(func(ctx context.Context) {
+ ccb.serializer.TrySchedule(func(ctx context.Context) {
if ctx.Err() != nil || ccb.balancer == nil {
return
}
@@ -133,7 +143,7 @@ func (ccb *ccBalancerWrapper) close() {
ccb.closed = true
ccb.mu.Unlock()
channelz.Info(logger, ccb.cc.channelz, "ccBalancerWrapper: closing")
- ccb.serializer.Schedule(func(context.Context) {
+ ccb.serializer.TrySchedule(func(context.Context) {
if ccb.balancer == nil {
return
}
@@ -145,7 +155,7 @@ func (ccb *ccBalancerWrapper) close() {
// exitIdle invokes the balancer's exitIdle method in the serializer.
func (ccb *ccBalancerWrapper) exitIdle() {
- ccb.serializer.Schedule(func(ctx context.Context) {
+ ccb.serializer.TrySchedule(func(ctx context.Context) {
if ctx.Err() != nil || ccb.balancer == nil {
return
}
@@ -182,7 +192,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer
return acbw, nil
}
-func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
+func (ccb *ccBalancerWrapper) RemoveSubConn(balancer.SubConn) {
// The graceful switch balancer will never call this.
logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc")
}
@@ -252,15 +262,29 @@ type acBalancerWrapper struct {
// updateState is invoked by grpc to push a subConn state update to the
// underlying balancer.
-func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) {
- acbw.ccb.serializer.Schedule(func(ctx context.Context) {
+func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolver.Address, err error) {
+ acbw.ccb.serializer.TrySchedule(func(ctx context.Context) {
if ctx.Err() != nil || acbw.ccb.balancer == nil {
return
}
// Even though it is optional for balancers, gracefulswitch ensures
// opts.StateListener is set, so this cannot ever be nil.
// TODO: delete this comment when UpdateSubConnState is removed.
- acbw.stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err})
+ scs := balancer.SubConnState{ConnectivityState: s, ConnectionError: err}
+ if s == connectivity.Ready {
+ setConnectedAddress(&scs, curAddr)
+ }
+ acbw.stateListener(scs)
+ acbw.ac.mu.Lock()
+ defer acbw.ac.mu.Unlock()
+ if s == connectivity.Ready {
+ // When changing states to READY, reset stateReadyChan. Wait until
+ // after we notify the LB policy's listener(s) in order to prevent
+ // ac.getTransport() from unblocking before the LB policy starts
+ // tracking the subchannel as READY.
+ close(acbw.ac.stateReadyChan)
+ acbw.ac.stateReadyChan = make(chan struct{})
+ }
})
}
@@ -318,8 +342,8 @@ func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (
pData := acbw.producers[pb]
if pData == nil {
// Not found; create a new one and add it to the producers map.
- p, close := pb.Build(acbw)
- pData = &refCountedProducer{producer: p, close: close}
+ p, closeFn := pb.Build(acbw)
+ pData = &refCountedProducer{producer: p, close: closeFn}
acbw.producers[pb] = pData
}
// Account for this new reference.
diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
index 63c639e4fe..55bffaa77e 100644
--- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
+++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
@@ -18,8 +18,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.1
-// protoc v4.25.2
+// protoc-gen-go v1.34.2
+// protoc v5.27.1
// source: grpc/binlog/v1/binarylog.proto
package grpc_binarylog_v1
@@ -1015,7 +1015,7 @@ func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte {
var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
-var file_grpc_binlog_v1_binarylog_proto_goTypes = []interface{}{
+var file_grpc_binlog_v1_binarylog_proto_goTypes = []any{
(GrpcLogEntry_EventType)(0), // 0: grpc.binarylog.v1.GrpcLogEntry.EventType
(GrpcLogEntry_Logger)(0), // 1: grpc.binarylog.v1.GrpcLogEntry.Logger
(Address_Type)(0), // 2: grpc.binarylog.v1.Address.Type
@@ -1058,7 +1058,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*GrpcLogEntry); i {
case 0:
return &v.state
@@ -1070,7 +1070,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return nil
}
}
- file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*ClientHeader); i {
case 0:
return &v.state
@@ -1082,7 +1082,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return nil
}
}
- file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*ServerHeader); i {
case 0:
return &v.state
@@ -1094,7 +1094,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return nil
}
}
- file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v any, i int) any {
switch v := v.(*Trailer); i {
case 0:
return &v.state
@@ -1106,7 +1106,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return nil
}
}
- file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v any, i int) any {
switch v := v.(*Message); i {
case 0:
return &v.state
@@ -1118,7 +1118,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return nil
}
}
- file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v any, i int) any {
switch v := v.(*Metadata); i {
case 0:
return &v.state
@@ -1130,7 +1130,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return nil
}
}
- file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v any, i int) any {
switch v := v.(*MetadataEntry); i {
case 0:
return &v.state
@@ -1142,7 +1142,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return nil
}
}
- file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v any, i int) any {
switch v := v.(*Address); i {
case 0:
return &v.state
@@ -1155,7 +1155,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
}
}
}
- file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []interface{}{
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []any{
(*GrpcLogEntry_ClientHeader)(nil),
(*GrpcLogEntry_ServerHeader)(nil),
(*GrpcLogEntry_Message)(nil),
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
index 423be7b43b..9c8850e3fd 100644
--- a/vendor/google.golang.org/grpc/clientconn.go
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -24,6 +24,7 @@ import (
"fmt"
"math"
"net/url"
+ "slices"
"strings"
"sync"
"sync/atomic"
@@ -39,6 +40,7 @@ import (
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/internal/idle"
iresolver "google.golang.org/grpc/internal/resolver"
+ "google.golang.org/grpc/internal/stats"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/resolver"
@@ -194,8 +196,11 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error)
cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz)
cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers)
+ cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers)
+
cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc.
cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout)
+
return cc, nil
}
@@ -590,13 +595,14 @@ type ClientConn struct {
cancel context.CancelFunc // Cancelled on close.
// The following are initialized at dial time, and are read-only after that.
- target string // User's dial target.
- parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder().
- authority string // See initAuthority().
- dopts dialOptions // Default and user specified dial options.
- channelz *channelz.Channel // Channelz object.
- resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder().
- idlenessMgr *idle.Manager
+ target string // User's dial target.
+ parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder().
+ authority string // See initAuthority().
+ dopts dialOptions // Default and user specified dial options.
+ channelz *channelz.Channel // Channelz object.
+ resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder().
+ idlenessMgr *idle.Manager
+ metricsRecorderList *stats.MetricsRecorderList
// The following provide their own synchronization, and therefore don't
// require cc.mu to be held to access them.
@@ -626,11 +632,6 @@ type ClientConn struct {
// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
// ctx expires. A true value is returned in former case and false in latter.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool {
ch := cc.csMgr.getNotifyChan()
if cc.csMgr.getState() != sourceState {
@@ -645,11 +646,6 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec
}
// GetState returns the connectivity.State of ClientConn.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
-// release.
func (cc *ClientConn) GetState() connectivity.State {
return cc.csMgr.getState()
}
@@ -812,17 +808,11 @@ func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) {
cc.csMgr.updateState(connectivity.TransientFailure)
}
-// Makes a copy of the input addresses slice and clears out the balancer
-// attributes field. Addresses are passed during subconn creation and address
-// update operations. In both cases, we will clear the balancer attributes by
-// calling this function, and therefore we will be able to use the Equal method
-// provided by the resolver.Address type for comparison.
-func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address {
+// Makes a copy of the input addresses slice. Addresses are passed during
+// subconn creation and address update operations.
+func copyAddresses(in []resolver.Address) []resolver.Address {
out := make([]resolver.Address, len(in))
- for i := range in {
- out[i] = in[i]
- out[i].BalancerAttributes = nil
- }
+ copy(out, in)
return out
}
@@ -835,14 +825,14 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer.
}
ac := &addrConn{
- state: connectivity.Idle,
- cc: cc,
- addrs: copyAddressesWithoutBalancerAttributes(addrs),
- scopts: opts,
- dopts: cc.dopts,
- channelz: channelz.RegisterSubChannel(cc.channelz, ""),
- resetBackoff: make(chan struct{}),
- stateChan: make(chan struct{}),
+ state: connectivity.Idle,
+ cc: cc,
+ addrs: copyAddresses(addrs),
+ scopts: opts,
+ dopts: cc.dopts,
+ channelz: channelz.RegisterSubChannel(cc.channelz, ""),
+ resetBackoff: make(chan struct{}),
+ stateReadyChan: make(chan struct{}),
}
ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
// Start with our address set to the first address; this may be updated if
@@ -918,28 +908,29 @@ func (ac *addrConn) connect() error {
ac.mu.Unlock()
return nil
}
- ac.mu.Unlock()
- ac.resetTransport()
+ ac.resetTransportAndUnlock()
return nil
}
-func equalAddresses(a, b []resolver.Address) bool {
- if len(a) != len(b) {
- return false
- }
- for i, v := range a {
- if !v.Equal(b[i]) {
- return false
- }
- }
- return true
+// equalAddressIgnoringBalAttributes returns true is a and b are considered equal.
+// This is different from the Equal method on the resolver.Address type which
+// considers all fields to determine equality. Here, we only consider fields
+// that are meaningful to the subConn.
+func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool {
+ return a.Addr == b.Addr && a.ServerName == b.ServerName &&
+ a.Attributes.Equal(b.Attributes) &&
+ a.Metadata == b.Metadata
+}
+
+func equalAddressesIgnoringBalAttributes(a, b []resolver.Address) bool {
+ return slices.EqualFunc(a, b, func(a, b resolver.Address) bool { return equalAddressIgnoringBalAttributes(&a, &b) })
}
// updateAddrs updates ac.addrs with the new addresses list and handles active
// connections or connection attempts.
func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
- addrs = copyAddressesWithoutBalancerAttributes(addrs)
+ addrs = copyAddresses(addrs)
limit := len(addrs)
if limit > 5 {
limit = 5
@@ -947,7 +938,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
channelz.Infof(logger, ac.channelz, "addrConn: updateAddrs addrs (%d of %d): %v", limit, len(addrs), addrs[:limit])
ac.mu.Lock()
- if equalAddresses(ac.addrs, addrs) {
+ if equalAddressesIgnoringBalAttributes(ac.addrs, addrs) {
ac.mu.Unlock()
return
}
@@ -966,7 +957,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
// Try to find the connected address.
for _, a := range addrs {
a.ServerName = ac.cc.getServerName(a)
- if a.Equal(ac.curAddr) {
+ if equalAddressIgnoringBalAttributes(&a, &ac.curAddr) {
// We are connected to a valid address, so do nothing but
// update the addresses.
ac.mu.Unlock()
@@ -992,11 +983,9 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
ac.updateConnectivityState(connectivity.Idle, nil)
}
- ac.mu.Unlock()
-
// Since we were connecting/connected, we should start a new connection
// attempt.
- go ac.resetTransport()
+ go ac.resetTransportAndUnlock()
}
// getServerName determines the serverName to be used in the connection
@@ -1190,8 +1179,8 @@ type addrConn struct {
addrs []resolver.Address // All addresses that the resolver resolved to.
// Use updateConnectivityState for updating addrConn's connectivity state.
- state connectivity.State
- stateChan chan struct{} // closed and recreated on every state change.
+ state connectivity.State
+ stateReadyChan chan struct{} // closed and recreated on every READY state change.
backoffIdx int // Needs to be stateful for resetConnectBackoff.
resetBackoff chan struct{}
@@ -1204,9 +1193,6 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error)
if ac.state == s {
return
}
- // When changing states, reset the state change channel.
- close(ac.stateChan)
- ac.stateChan = make(chan struct{})
ac.state = s
ac.channelz.ChannelMetrics.State.Store(&s)
if lastErr == nil {
@@ -1214,7 +1200,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error)
} else {
channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v, last error: %s", s, lastErr)
}
- ac.acbw.updateState(s, lastErr)
+ ac.acbw.updateState(s, ac.curAddr, lastErr)
}
// adjustParams updates parameters used to create transports upon
@@ -1231,8 +1217,10 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) {
}
}
-func (ac *addrConn) resetTransport() {
- ac.mu.Lock()
+// resetTransportAndUnlock unconditionally connects the addrConn.
+//
+// ac.mu must be held by the caller, and this function will guarantee it is released.
+func (ac *addrConn) resetTransportAndUnlock() {
acCtx := ac.ctx
if acCtx.Err() != nil {
ac.mu.Unlock()
@@ -1522,7 +1510,7 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport {
func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) {
for ctx.Err() == nil {
ac.mu.Lock()
- t, state, sc := ac.transport, ac.state, ac.stateChan
+ t, state, sc := ac.transport, ac.state, ac.stateReadyChan
ac.mu.Unlock()
if state == connectivity.Ready {
return t, nil
@@ -1585,7 +1573,7 @@ func (ac *addrConn) tearDown(err error) {
} else {
// Hard close the transport when the channel is entering idle or is
// being shutdown. In the case where the channel is being shutdown,
- // closing of transports is also taken care of by cancelation of cc.ctx.
+ // closing of transports is also taken care of by cancellation of cc.ctx.
// But in the case where the channel is entering idle, we need to
// explicitly close the transports here. Instead of distinguishing
// between these two cases, it is simpler to close the transport
diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go
index 411e3dfd47..e840858b77 100644
--- a/vendor/google.golang.org/grpc/codec.go
+++ b/vendor/google.golang.org/grpc/codec.go
@@ -21,18 +21,73 @@ package grpc
import (
"google.golang.org/grpc/encoding"
_ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto"
+ "google.golang.org/grpc/mem"
)
-// baseCodec contains the functionality of both Codec and encoding.Codec, but
-// omits the name/string, which vary between the two and are not needed for
-// anything besides the registry in the encoding package.
+// baseCodec captures the new encoding.CodecV2 interface without the Name
+// function, allowing it to be implemented by older Codec and encoding.Codec
+// implementations. The omitted Name function is only needed for the register in
+// the encoding package and is not part of the core functionality.
type baseCodec interface {
- Marshal(v any) ([]byte, error)
- Unmarshal(data []byte, v any) error
+ Marshal(v any) (mem.BufferSlice, error)
+ Unmarshal(data mem.BufferSlice, v any) error
+}
+
+// getCodec returns an encoding.CodecV2 for the codec of the given name (if
+// registered). Initially checks the V2 registry with encoding.GetCodecV2 and
+// returns the V2 codec if it is registered. Otherwise, it checks the V1 registry
+// with encoding.GetCodec and if it is registered wraps it with newCodecV1Bridge
+// to turn it into an encoding.CodecV2. Returns nil otherwise.
+func getCodec(name string) encoding.CodecV2 {
+ if codecV1 := encoding.GetCodec(name); codecV1 != nil {
+ return newCodecV1Bridge(codecV1)
+ }
+
+ return encoding.GetCodecV2(name)
+}
+
+func newCodecV0Bridge(c Codec) baseCodec {
+ return codecV0Bridge{codec: c}
+}
+
+func newCodecV1Bridge(c encoding.Codec) encoding.CodecV2 {
+ return codecV1Bridge{
+ codecV0Bridge: codecV0Bridge{codec: c},
+ name: c.Name(),
+ }
+}
+
+var _ baseCodec = codecV0Bridge{}
+
+type codecV0Bridge struct {
+ codec interface {
+ Marshal(v any) ([]byte, error)
+ Unmarshal(data []byte, v any) error
+ }
+}
+
+func (c codecV0Bridge) Marshal(v any) (mem.BufferSlice, error) {
+ data, err := c.codec.Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ return mem.BufferSlice{mem.NewBuffer(&data, nil)}, nil
+}
+
+func (c codecV0Bridge) Unmarshal(data mem.BufferSlice, v any) (err error) {
+ return c.codec.Unmarshal(data.Materialize(), v)
}
-var _ baseCodec = Codec(nil)
-var _ baseCodec = encoding.Codec(nil)
+var _ encoding.CodecV2 = codecV1Bridge{}
+
+type codecV1Bridge struct {
+ codecV0Bridge
+ name string
+}
+
+func (c codecV1Bridge) Name() string {
+ return c.name
+}
// Codec defines the interface gRPC uses to encode and decode messages.
// Note that implementations of this interface must be thread safe;
diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
index 82bee1443b..4c805c6446 100644
--- a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
+++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
@@ -40,7 +40,7 @@ func NewCredentials() credentials.TransportCredentials {
// NoSecurity.
type insecureTC struct{}
-func (insecureTC) ClientHandshake(ctx context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) {
+func (insecureTC) ClientHandshake(_ context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) {
return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil
}
diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go
index f5453d48a5..2b285beee3 100644
--- a/vendor/google.golang.org/grpc/dialoptions.go
+++ b/vendor/google.golang.org/grpc/dialoptions.go
@@ -33,6 +33,7 @@ import (
"google.golang.org/grpc/internal/binarylog"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/stats"
)
@@ -60,7 +61,7 @@ func init() {
internal.WithBinaryLogger = withBinaryLogger
internal.JoinDialOptions = newJoinDialOption
internal.DisableGlobalDialOptions = newDisableGlobalDialOptions
- internal.WithRecvBufferPool = withRecvBufferPool
+ internal.WithBufferPool = withBufferPool
}
// dialOptions configure a Dial call. dialOptions are set by the DialOption
@@ -92,7 +93,6 @@ type dialOptions struct {
defaultServiceConfigRawJSON *string
resolvers []resolver.Builder
idleTimeout time.Duration
- recvBufferPool SharedBufferPool
defaultScheme string
maxCallAttempts int
}
@@ -518,6 +518,8 @@ func WithUserAgent(s string) DialOption {
// WithKeepaliveParams returns a DialOption that specifies keepalive parameters
// for the client transport.
+//
+// Keepalive is disabled by default.
func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption {
if kp.Time < internal.KeepaliveMinPingTime {
logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime)
@@ -677,11 +679,11 @@ func defaultDialOptions() dialOptions {
WriteBufferSize: defaultWriteBufSize,
UseProxy: true,
UserAgent: grpcUA,
+ BufferPool: mem.DefaultBufferPool(),
},
bs: internalbackoff.DefaultExponential,
healthCheckFunc: internal.HealthCheckFunc,
idleTimeout: 30 * time.Minute,
- recvBufferPool: nopBufferPool{},
defaultScheme: "dns",
maxCallAttempts: defaultMaxCallAttempts,
}
@@ -758,25 +760,8 @@ func WithMaxCallAttempts(n int) DialOption {
})
}
-// WithRecvBufferPool returns a DialOption that configures the ClientConn
-// to use the provided shared buffer pool for parsing incoming messages. Depending
-// on the application's workload, this could result in reduced memory allocation.
-//
-// If you are unsure about how to implement a memory pool but want to utilize one,
-// begin with grpc.NewSharedBufferPool.
-//
-// Note: The shared buffer pool feature will not be active if any of the following
-// options are used: WithStatsHandler, EnableTracing, or binary logging. In such
-// cases, the shared buffer pool will be ignored.
-//
-// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in
-// v1.60.0 or later.
-func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption {
- return withRecvBufferPool(bufferPool)
-}
-
-func withRecvBufferPool(bufferPool SharedBufferPool) DialOption {
+func withBufferPool(bufferPool mem.BufferPool) DialOption {
return newFuncDialOption(func(o *dialOptions) {
- o.recvBufferPool = bufferPool
+ o.copts.BufferPool = bufferPool
})
}
diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go
index 0022859ad7..e7b532b6f8 100644
--- a/vendor/google.golang.org/grpc/doc.go
+++ b/vendor/google.golang.org/grpc/doc.go
@@ -16,7 +16,7 @@
*
*/
-//go:generate ./regenerate.sh
+//go:generate ./scripts/regenerate.sh
/*
Package grpc implements an RPC system called gRPC.
diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go
index 5ebf88d714..11d0ae142c 100644
--- a/vendor/google.golang.org/grpc/encoding/encoding.go
+++ b/vendor/google.golang.org/grpc/encoding/encoding.go
@@ -94,7 +94,7 @@ type Codec interface {
Name() string
}
-var registeredCodecs = make(map[string]Codec)
+var registeredCodecs = make(map[string]any)
// RegisterCodec registers the provided Codec for use with all gRPC clients and
// servers.
@@ -126,5 +126,6 @@ func RegisterCodec(codec Codec) {
//
// The content-subtype is expected to be lowercase.
func GetCodec(contentSubtype string) Codec {
- return registeredCodecs[contentSubtype]
+ c, _ := registeredCodecs[contentSubtype].(Codec)
+ return c
}
diff --git a/vendor/google.golang.org/grpc/encoding/encoding_v2.go b/vendor/google.golang.org/grpc/encoding/encoding_v2.go
new file mode 100644
index 0000000000..074c5e234a
--- /dev/null
+++ b/vendor/google.golang.org/grpc/encoding/encoding_v2.go
@@ -0,0 +1,81 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package encoding
+
+import (
+ "strings"
+
+ "google.golang.org/grpc/mem"
+)
+
+// CodecV2 defines the interface gRPC uses to encode and decode messages. Note
+// that implementations of this interface must be thread safe; a CodecV2's
+// methods can be called from concurrent goroutines.
+type CodecV2 interface {
+ // Marshal returns the wire format of v. The buffers in the returned
+ // [mem.BufferSlice] must have at least one reference each, which will be freed
+ // by gRPC when they are no longer needed.
+ Marshal(v any) (out mem.BufferSlice, err error)
+ // Unmarshal parses the wire format into v. Note that data will be freed as soon
+ // as this function returns. If the codec wishes to guarantee access to the data
+ // after this function, it must take its own reference that it frees when it is
+ // no longer needed.
+ Unmarshal(data mem.BufferSlice, v any) error
+ // Name returns the name of the Codec implementation. The returned string
+ // will be used as part of content type in transmission. The result must be
+ // static; the result cannot change between calls.
+ Name() string
+}
+
+// RegisterCodecV2 registers the provided CodecV2 for use with all gRPC clients and
+// servers.
+//
+// The CodecV2 will be stored and looked up by result of its Name() method, which
+// should match the content-subtype of the encoding handled by the CodecV2. This
+// is case-insensitive, and is stored and looked up as lowercase. If the
+// result of calling Name() is an empty string, RegisterCodecV2 will panic. See
+// Content-Type on
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details.
+//
+// If both a Codec and CodecV2 are registered with the same name, the CodecV2
+// will be used.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple Codecs are
+// registered with the same name, the one registered last will take effect.
+func RegisterCodecV2(codec CodecV2) {
+ if codec == nil {
+ panic("cannot register a nil CodecV2")
+ }
+ if codec.Name() == "" {
+ panic("cannot register CodecV2 with empty string result for Name()")
+ }
+ contentSubtype := strings.ToLower(codec.Name())
+ registeredCodecs[contentSubtype] = codec
+}
+
+// GetCodecV2 gets a registered CodecV2 by content-subtype, or nil if no CodecV2 is
+// registered for the content-subtype.
+//
+// The content-subtype is expected to be lowercase.
+func GetCodecV2(contentSubtype string) CodecV2 {
+ c, _ := registeredCodecs[contentSubtype].(CodecV2)
+ return c
+}
diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go
index 66d5cdf03e..ceec319dd2 100644
--- a/vendor/google.golang.org/grpc/encoding/proto/proto.go
+++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2018 gRPC authors.
+ * Copyright 2024 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -24,6 +24,7 @@ import (
"fmt"
"google.golang.org/grpc/encoding"
+ "google.golang.org/grpc/mem"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/protoadapt"
)
@@ -32,28 +33,51 @@ import (
const Name = "proto"
func init() {
- encoding.RegisterCodec(codec{})
+ encoding.RegisterCodecV2(&codecV2{})
}
-// codec is a Codec implementation with protobuf. It is the default codec for gRPC.
-type codec struct{}
+// codec is a CodecV2 implementation with protobuf. It is the default codec for
+// gRPC.
+type codecV2 struct{}
-func (codec) Marshal(v any) ([]byte, error) {
+func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) {
vv := messageV2Of(v)
if vv == nil {
- return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v)
+ return nil, fmt.Errorf("proto: failed to marshal, message is %T, want proto.Message", v)
}
- return proto.Marshal(vv)
+ size := proto.Size(vv)
+ if mem.IsBelowBufferPoolingThreshold(size) {
+ buf, err := proto.Marshal(vv)
+ if err != nil {
+ return nil, err
+ }
+ data = append(data, mem.SliceBuffer(buf))
+ } else {
+ pool := mem.DefaultBufferPool()
+ buf := pool.Get(size)
+ if _, err := (proto.MarshalOptions{}).MarshalAppend((*buf)[:0], vv); err != nil {
+ pool.Put(buf)
+ return nil, err
+ }
+ data = append(data, mem.NewBuffer(buf, pool))
+ }
+
+ return data, nil
}
-func (codec) Unmarshal(data []byte, v any) error {
+func (c *codecV2) Unmarshal(data mem.BufferSlice, v any) (err error) {
vv := messageV2Of(v)
if vv == nil {
return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v)
}
- return proto.Unmarshal(data, vv)
+ buf := data.MaterializeToBuffer(mem.DefaultBufferPool())
+ defer buf.Free()
+ // TODO: Upgrade proto.Unmarshal to support mem.BufferSlice. Right now, it's not
+ // really possible without a major overhaul of the proto package, but the
+ // vtprotobuf library may be able to support this.
+ return proto.Unmarshal(buf.ReadOnlyData(), vv)
}
func messageV2Of(v any) proto.Message {
@@ -67,6 +91,6 @@ func messageV2Of(v any) proto.Message {
return nil
}
-func (codec) Name() string {
+func (c *codecV2) Name() string {
return Name
}
diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go
new file mode 100644
index 0000000000..1d827dd5d9
--- /dev/null
+++ b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go
@@ -0,0 +1,269 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package stats
+
+import (
+ "maps"
+
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/internal"
+)
+
+func init() {
+ internal.SnapshotMetricRegistryForTesting = snapshotMetricsRegistryForTesting
+}
+
+var logger = grpclog.Component("metrics-registry")
+
+// DefaultMetrics are the default metrics registered through global metrics
+// registry. This is written to at initialization time only, and is read only
+// after initialization.
+var DefaultMetrics = NewMetrics()
+
+// MetricDescriptor is the data for a registered metric.
+type MetricDescriptor struct {
+ // The name of this metric. This name must be unique across the whole binary
+ // (including any per call metrics). See
+ // https://github.com/grpc/proposal/blob/master/A79-non-per-call-metrics-architecture.md#metric-instrument-naming-conventions
+ // for metric naming conventions.
+ Name Metric
+ // The description of this metric.
+ Description string
+ // The unit (e.g. entries, seconds) of this metric.
+ Unit string
+ // The required label keys for this metric. These are intended to
+ // metrics emitted from a stats handler.
+ Labels []string
+ // The optional label keys for this metric. These are intended to attached
+ // to metrics emitted from a stats handler if configured.
+ OptionalLabels []string
+ // Whether this metric is on by default.
+ Default bool
+ // The type of metric. This is set by the metric registry, and not intended
+ // to be set by a component registering a metric.
+ Type MetricType
+ // Bounds are the bounds of this metric. This only applies to histogram
+ // metrics. If unset or set with length 0, stats handlers will fall back to
+ // default bounds.
+ Bounds []float64
+}
+
+// MetricType is the type of metric.
+type MetricType int
+
+// Type of metric supported by this instrument registry.
+const (
+ MetricTypeIntCount MetricType = iota
+ MetricTypeFloatCount
+ MetricTypeIntHisto
+ MetricTypeFloatHisto
+ MetricTypeIntGauge
+)
+
+// Int64CountHandle is a typed handle for a int count metric. This handle
+// is passed at the recording point in order to know which metric to record
+// on.
+type Int64CountHandle MetricDescriptor
+
+// Descriptor returns the int64 count handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Int64CountHandle) Descriptor() *MetricDescriptor {
+ return (*MetricDescriptor)(h)
+}
+
+// Record records the int64 count value on the metrics recorder provided.
+func (h *Int64CountHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) {
+ recorder.RecordInt64Count(h, incr, labels...)
+}
+
+// Float64CountHandle is a typed handle for a float count metric. This handle is
+// passed at the recording point in order to know which metric to record on.
+type Float64CountHandle MetricDescriptor
+
+// Descriptor returns the float64 count handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Float64CountHandle) Descriptor() *MetricDescriptor {
+ return (*MetricDescriptor)(h)
+}
+
+// Record records the float64 count value on the metrics recorder provided.
+func (h *Float64CountHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) {
+ recorder.RecordFloat64Count(h, incr, labels...)
+}
+
+// Int64HistoHandle is a typed handle for an int histogram metric. This handle
+// is passed at the recording point in order to know which metric to record on.
+type Int64HistoHandle MetricDescriptor
+
+// Descriptor returns the int64 histo handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Int64HistoHandle) Descriptor() *MetricDescriptor {
+ return (*MetricDescriptor)(h)
+}
+
+// Record records the int64 histo value on the metrics recorder provided.
+func (h *Int64HistoHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) {
+ recorder.RecordInt64Histo(h, incr, labels...)
+}
+
+// Float64HistoHandle is a typed handle for a float histogram metric. This
+// handle is passed at the recording point in order to know which metric to
+// record on.
+type Float64HistoHandle MetricDescriptor
+
+// Descriptor returns the float64 histo handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Float64HistoHandle) Descriptor() *MetricDescriptor {
+ return (*MetricDescriptor)(h)
+}
+
+// Record records the float64 histo value on the metrics recorder provided.
+func (h *Float64HistoHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) {
+ recorder.RecordFloat64Histo(h, incr, labels...)
+}
+
+// Int64GaugeHandle is a typed handle for an int gauge metric. This handle is
+// passed at the recording point in order to know which metric to record on.
+type Int64GaugeHandle MetricDescriptor
+
+// Descriptor returns the int64 gauge handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Int64GaugeHandle) Descriptor() *MetricDescriptor {
+ return (*MetricDescriptor)(h)
+}
+
+// Record records the int64 histo value on the metrics recorder provided.
+func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) {
+ recorder.RecordInt64Gauge(h, incr, labels...)
+}
+
+// registeredMetrics are the registered metric descriptor names.
+var registeredMetrics = make(map[Metric]bool)
+
+// metricsRegistry contains all of the registered metrics.
+//
+// This is written to only at init time, and read only after that.
+var metricsRegistry = make(map[Metric]*MetricDescriptor)
+
+// DescriptorForMetric returns the MetricDescriptor from the global registry.
+//
+// Returns nil if MetricDescriptor not present.
+func DescriptorForMetric(metric Metric) *MetricDescriptor {
+ return metricsRegistry[metric]
+}
+
+func registerMetric(name Metric, def bool) {
+ if registeredMetrics[name] {
+ logger.Fatalf("metric %v already registered", name)
+ }
+ registeredMetrics[name] = true
+ if def {
+ DefaultMetrics = DefaultMetrics.Add(name)
+ }
+}
+
+// RegisterInt64Count registers the metric description onto the global registry.
+// It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterInt64Count(descriptor MetricDescriptor) *Int64CountHandle {
+ registerMetric(descriptor.Name, descriptor.Default)
+ descriptor.Type = MetricTypeIntCount
+ descPtr := &descriptor
+ metricsRegistry[descriptor.Name] = descPtr
+ return (*Int64CountHandle)(descPtr)
+}
+
+// RegisterFloat64Count registers the metric description onto the global
+// registry. It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterFloat64Count(descriptor MetricDescriptor) *Float64CountHandle {
+ registerMetric(descriptor.Name, descriptor.Default)
+ descriptor.Type = MetricTypeFloatCount
+ descPtr := &descriptor
+ metricsRegistry[descriptor.Name] = descPtr
+ return (*Float64CountHandle)(descPtr)
+}
+
+// RegisterInt64Histo registers the metric description onto the global registry.
+// It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterInt64Histo(descriptor MetricDescriptor) *Int64HistoHandle {
+ registerMetric(descriptor.Name, descriptor.Default)
+ descriptor.Type = MetricTypeIntHisto
+ descPtr := &descriptor
+ metricsRegistry[descriptor.Name] = descPtr
+ return (*Int64HistoHandle)(descPtr)
+}
+
+// RegisterFloat64Histo registers the metric description onto the global
+// registry. It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterFloat64Histo(descriptor MetricDescriptor) *Float64HistoHandle {
+ registerMetric(descriptor.Name, descriptor.Default)
+ descriptor.Type = MetricTypeFloatHisto
+ descPtr := &descriptor
+ metricsRegistry[descriptor.Name] = descPtr
+ return (*Float64HistoHandle)(descPtr)
+}
+
+// RegisterInt64Gauge registers the metric description onto the global registry.
+// It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterInt64Gauge(descriptor MetricDescriptor) *Int64GaugeHandle {
+ registerMetric(descriptor.Name, descriptor.Default)
+ descriptor.Type = MetricTypeIntGauge
+ descPtr := &descriptor
+ metricsRegistry[descriptor.Name] = descPtr
+ return (*Int64GaugeHandle)(descPtr)
+}
+
+// snapshotMetricsRegistryForTesting snapshots the global data of the metrics
+// registry. Returns a cleanup function that sets the metrics registry to its
+// original state.
+func snapshotMetricsRegistryForTesting() func() {
+ oldDefaultMetrics := DefaultMetrics
+ oldRegisteredMetrics := registeredMetrics
+ oldMetricsRegistry := metricsRegistry
+
+ registeredMetrics = make(map[Metric]bool)
+ metricsRegistry = make(map[Metric]*MetricDescriptor)
+ maps.Copy(registeredMetrics, registeredMetrics)
+ maps.Copy(metricsRegistry, metricsRegistry)
+
+ return func() {
+ DefaultMetrics = oldDefaultMetrics
+ registeredMetrics = oldRegisteredMetrics
+ metricsRegistry = oldMetricsRegistry
+ }
+}
diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go
new file mode 100644
index 0000000000..3221f7a633
--- /dev/null
+++ b/vendor/google.golang.org/grpc/experimental/stats/metrics.go
@@ -0,0 +1,114 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package stats contains experimental metrics/stats API's.
+package stats
+
+import "maps"
+
+// MetricsRecorder records on metrics derived from metric registry.
+type MetricsRecorder interface {
+ // RecordInt64Count records the measurement alongside labels on the int
+ // count associated with the provided handle.
+ RecordInt64Count(handle *Int64CountHandle, incr int64, labels ...string)
+ // RecordFloat64Count records the measurement alongside labels on the float
+ // count associated with the provided handle.
+ RecordFloat64Count(handle *Float64CountHandle, incr float64, labels ...string)
+ // RecordInt64Histo records the measurement alongside labels on the int
+ // histo associated with the provided handle.
+ RecordInt64Histo(handle *Int64HistoHandle, incr int64, labels ...string)
+ // RecordFloat64Histo records the measurement alongside labels on the float
+ // histo associated with the provided handle.
+ RecordFloat64Histo(handle *Float64HistoHandle, incr float64, labels ...string)
+ // RecordInt64Gauge records the measurement alongside labels on the int
+ // gauge associated with the provided handle.
+ RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string)
+}
+
+// Metric is an identifier for a metric.
+type Metric string
+
+// Metrics is a set of metrics to record. Once created, Metrics is immutable,
+// however Add and Remove can make copies with specific metrics added or
+// removed, respectively.
+//
+// Do not construct directly; use NewMetrics instead.
+type Metrics struct {
+ // metrics are the set of metrics to initialize.
+ metrics map[Metric]bool
+}
+
+// NewMetrics returns a Metrics containing Metrics.
+func NewMetrics(metrics ...Metric) *Metrics {
+ newMetrics := make(map[Metric]bool)
+ for _, metric := range metrics {
+ newMetrics[metric] = true
+ }
+ return &Metrics{
+ metrics: newMetrics,
+ }
+}
+
+// Metrics returns the metrics set. The returned map is read-only and must not
+// be modified.
+func (m *Metrics) Metrics() map[Metric]bool {
+ return m.metrics
+}
+
+// Add adds the metrics to the metrics set and returns a new copy with the
+// additional metrics.
+func (m *Metrics) Add(metrics ...Metric) *Metrics {
+ newMetrics := make(map[Metric]bool)
+ for metric := range m.metrics {
+ newMetrics[metric] = true
+ }
+
+ for _, metric := range metrics {
+ newMetrics[metric] = true
+ }
+ return &Metrics{
+ metrics: newMetrics,
+ }
+}
+
+// Join joins the metrics passed in with the metrics set, and returns a new copy
+// with the merged metrics.
+func (m *Metrics) Join(metrics *Metrics) *Metrics {
+ newMetrics := make(map[Metric]bool)
+ maps.Copy(newMetrics, m.metrics)
+ maps.Copy(newMetrics, metrics.metrics)
+ return &Metrics{
+ metrics: newMetrics,
+ }
+}
+
+// Remove removes the metrics from the metrics set and returns a new copy with
+// the metrics removed.
+func (m *Metrics) Remove(metrics ...Metric) *Metrics {
+ newMetrics := make(map[Metric]bool)
+ for metric := range m.metrics {
+ newMetrics[metric] = true
+ }
+
+ for _, metric := range metrics {
+ delete(newMetrics, metric)
+ }
+ return &Metrics{
+ metrics: newMetrics,
+ }
+}
diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go
index ac73c9ced2..f1ae080dcb 100644
--- a/vendor/google.golang.org/grpc/grpclog/component.go
+++ b/vendor/google.golang.org/grpc/grpclog/component.go
@@ -20,8 +20,6 @@ package grpclog
import (
"fmt"
-
- "google.golang.org/grpc/internal/grpclog"
)
// componentData records the settings for a component.
@@ -33,22 +31,22 @@ var cache = map[string]*componentData{}
func (c *componentData) InfoDepth(depth int, args ...any) {
args = append([]any{"[" + string(c.name) + "]"}, args...)
- grpclog.InfoDepth(depth+1, args...)
+ InfoDepth(depth+1, args...)
}
func (c *componentData) WarningDepth(depth int, args ...any) {
args = append([]any{"[" + string(c.name) + "]"}, args...)
- grpclog.WarningDepth(depth+1, args...)
+ WarningDepth(depth+1, args...)
}
func (c *componentData) ErrorDepth(depth int, args ...any) {
args = append([]any{"[" + string(c.name) + "]"}, args...)
- grpclog.ErrorDepth(depth+1, args...)
+ ErrorDepth(depth+1, args...)
}
func (c *componentData) FatalDepth(depth int, args ...any) {
args = append([]any{"[" + string(c.name) + "]"}, args...)
- grpclog.FatalDepth(depth+1, args...)
+ FatalDepth(depth+1, args...)
}
func (c *componentData) Info(args ...any) {
diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go
index 16928c9cb9..db320105e6 100644
--- a/vendor/google.golang.org/grpc/grpclog/grpclog.go
+++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go
@@ -18,18 +18,15 @@
// Package grpclog defines logging for grpc.
//
-// All logs in transport and grpclb packages only go to verbose level 2.
-// All logs in other packages in grpc are logged in spite of the verbosity level.
-//
-// In the default logger,
-// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL,
-// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL.
-package grpclog // import "google.golang.org/grpc/grpclog"
+// In the default logger, severity level can be set by environment variable
+// GRPC_GO_LOG_SEVERITY_LEVEL, verbosity level can be set by
+// GRPC_GO_LOG_VERBOSITY_LEVEL.
+package grpclog
import (
"os"
- "google.golang.org/grpc/internal/grpclog"
+ "google.golang.org/grpc/grpclog/internal"
)
func init() {
@@ -38,58 +35,58 @@ func init() {
// V reports whether verbosity level l is at least the requested verbose level.
func V(l int) bool {
- return grpclog.Logger.V(l)
+ return internal.LoggerV2Impl.V(l)
}
// Info logs to the INFO log.
func Info(args ...any) {
- grpclog.Logger.Info(args...)
+ internal.LoggerV2Impl.Info(args...)
}
// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf.
func Infof(format string, args ...any) {
- grpclog.Logger.Infof(format, args...)
+ internal.LoggerV2Impl.Infof(format, args...)
}
// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println.
func Infoln(args ...any) {
- grpclog.Logger.Infoln(args...)
+ internal.LoggerV2Impl.Infoln(args...)
}
// Warning logs to the WARNING log.
func Warning(args ...any) {
- grpclog.Logger.Warning(args...)
+ internal.LoggerV2Impl.Warning(args...)
}
// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf.
func Warningf(format string, args ...any) {
- grpclog.Logger.Warningf(format, args...)
+ internal.LoggerV2Impl.Warningf(format, args...)
}
// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println.
func Warningln(args ...any) {
- grpclog.Logger.Warningln(args...)
+ internal.LoggerV2Impl.Warningln(args...)
}
// Error logs to the ERROR log.
func Error(args ...any) {
- grpclog.Logger.Error(args...)
+ internal.LoggerV2Impl.Error(args...)
}
// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf.
func Errorf(format string, args ...any) {
- grpclog.Logger.Errorf(format, args...)
+ internal.LoggerV2Impl.Errorf(format, args...)
}
// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println.
func Errorln(args ...any) {
- grpclog.Logger.Errorln(args...)
+ internal.LoggerV2Impl.Errorln(args...)
}
// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print.
// It calls os.Exit() with exit code 1.
func Fatal(args ...any) {
- grpclog.Logger.Fatal(args...)
+ internal.LoggerV2Impl.Fatal(args...)
// Make sure fatal logs will exit.
os.Exit(1)
}
@@ -97,15 +94,15 @@ func Fatal(args ...any) {
// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf.
// It calls os.Exit() with exit code 1.
func Fatalf(format string, args ...any) {
- grpclog.Logger.Fatalf(format, args...)
+ internal.LoggerV2Impl.Fatalf(format, args...)
// Make sure fatal logs will exit.
os.Exit(1)
}
// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println.
-// It calle os.Exit()) with exit code 1.
+// It calls os.Exit() with exit code 1.
func Fatalln(args ...any) {
- grpclog.Logger.Fatalln(args...)
+ internal.LoggerV2Impl.Fatalln(args...)
// Make sure fatal logs will exit.
os.Exit(1)
}
@@ -114,19 +111,76 @@ func Fatalln(args ...any) {
//
// Deprecated: use Info.
func Print(args ...any) {
- grpclog.Logger.Info(args...)
+ internal.LoggerV2Impl.Info(args...)
}
// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
//
// Deprecated: use Infof.
func Printf(format string, args ...any) {
- grpclog.Logger.Infof(format, args...)
+ internal.LoggerV2Impl.Infof(format, args...)
}
// Println prints to the logger. Arguments are handled in the manner of fmt.Println.
//
// Deprecated: use Infoln.
func Println(args ...any) {
- grpclog.Logger.Infoln(args...)
+ internal.LoggerV2Impl.Infoln(args...)
+}
+
+// InfoDepth logs to the INFO log at the specified depth.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func InfoDepth(depth int, args ...any) {
+ if internal.DepthLoggerV2Impl != nil {
+ internal.DepthLoggerV2Impl.InfoDepth(depth, args...)
+ } else {
+ internal.LoggerV2Impl.Infoln(args...)
+ }
+}
+
+// WarningDepth logs to the WARNING log at the specified depth.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WarningDepth(depth int, args ...any) {
+ if internal.DepthLoggerV2Impl != nil {
+ internal.DepthLoggerV2Impl.WarningDepth(depth, args...)
+ } else {
+ internal.LoggerV2Impl.Warningln(args...)
+ }
+}
+
+// ErrorDepth logs to the ERROR log at the specified depth.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func ErrorDepth(depth int, args ...any) {
+ if internal.DepthLoggerV2Impl != nil {
+ internal.DepthLoggerV2Impl.ErrorDepth(depth, args...)
+ } else {
+ internal.LoggerV2Impl.Errorln(args...)
+ }
+}
+
+// FatalDepth logs to the FATAL log at the specified depth.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func FatalDepth(depth int, args ...any) {
+ if internal.DepthLoggerV2Impl != nil {
+ internal.DepthLoggerV2Impl.FatalDepth(depth, args...)
+ } else {
+ internal.LoggerV2Impl.Fatalln(args...)
+ }
+ os.Exit(1)
}
diff --git a/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go
new file mode 100644
index 0000000000..59c03bc14c
--- /dev/null
+++ b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go
@@ -0,0 +1,26 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package internal contains functionality internal to the grpclog package.
+package internal
+
+// LoggerV2Impl is the logger used for the non-depth log functions.
+var LoggerV2Impl LoggerV2
+
+// DepthLoggerV2Impl is the logger used for the depth log functions.
+var DepthLoggerV2Impl DepthLoggerV2
diff --git a/vendor/google.golang.org/grpc/grpclog/internal/logger.go b/vendor/google.golang.org/grpc/grpclog/internal/logger.go
new file mode 100644
index 0000000000..e524fdd40b
--- /dev/null
+++ b/vendor/google.golang.org/grpc/grpclog/internal/logger.go
@@ -0,0 +1,87 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package internal
+
+// Logger mimics golang's standard Logger as an interface.
+//
+// Deprecated: use LoggerV2.
+type Logger interface {
+ Fatal(args ...any)
+ Fatalf(format string, args ...any)
+ Fatalln(args ...any)
+ Print(args ...any)
+ Printf(format string, args ...any)
+ Println(args ...any)
+}
+
+// LoggerWrapper wraps Logger into a LoggerV2.
+type LoggerWrapper struct {
+ Logger
+}
+
+// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
+func (l *LoggerWrapper) Info(args ...any) {
+ l.Logger.Print(args...)
+}
+
+// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
+func (l *LoggerWrapper) Infoln(args ...any) {
+ l.Logger.Println(args...)
+}
+
+// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
+func (l *LoggerWrapper) Infof(format string, args ...any) {
+ l.Logger.Printf(format, args...)
+}
+
+// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
+func (l *LoggerWrapper) Warning(args ...any) {
+ l.Logger.Print(args...)
+}
+
+// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
+func (l *LoggerWrapper) Warningln(args ...any) {
+ l.Logger.Println(args...)
+}
+
+// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
+func (l *LoggerWrapper) Warningf(format string, args ...any) {
+ l.Logger.Printf(format, args...)
+}
+
+// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
+func (l *LoggerWrapper) Error(args ...any) {
+ l.Logger.Print(args...)
+}
+
+// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
+func (l *LoggerWrapper) Errorln(args ...any) {
+ l.Logger.Println(args...)
+}
+
+// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
+func (l *LoggerWrapper) Errorf(format string, args ...any) {
+ l.Logger.Printf(format, args...)
+}
+
+// V reports whether verbosity level l is at least the requested verbose level.
+func (*LoggerWrapper) V(int) bool {
+ // Returns true for all verbose level.
+ return true
+}
diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go
similarity index 52%
rename from vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
rename to vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go
index bfc45102ab..07df71e98a 100644
--- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
+++ b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2020 gRPC authors.
+ * Copyright 2024 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -16,59 +16,17 @@
*
*/
-// Package grpclog (internal) defines depth logging for grpc.
-package grpclog
+package internal
import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "log"
"os"
)
-// Logger is the logger used for the non-depth log functions.
-var Logger LoggerV2
-
-// DepthLogger is the logger used for the depth log functions.
-var DepthLogger DepthLoggerV2
-
-// InfoDepth logs to the INFO log at the specified depth.
-func InfoDepth(depth int, args ...any) {
- if DepthLogger != nil {
- DepthLogger.InfoDepth(depth, args...)
- } else {
- Logger.Infoln(args...)
- }
-}
-
-// WarningDepth logs to the WARNING log at the specified depth.
-func WarningDepth(depth int, args ...any) {
- if DepthLogger != nil {
- DepthLogger.WarningDepth(depth, args...)
- } else {
- Logger.Warningln(args...)
- }
-}
-
-// ErrorDepth logs to the ERROR log at the specified depth.
-func ErrorDepth(depth int, args ...any) {
- if DepthLogger != nil {
- DepthLogger.ErrorDepth(depth, args...)
- } else {
- Logger.Errorln(args...)
- }
-}
-
-// FatalDepth logs to the FATAL log at the specified depth.
-func FatalDepth(depth int, args ...any) {
- if DepthLogger != nil {
- DepthLogger.FatalDepth(depth, args...)
- } else {
- Logger.Fatalln(args...)
- }
- os.Exit(1)
-}
-
// LoggerV2 does underlying logging work for grpclog.
-// This is a copy of the LoggerV2 defined in the external grpclog package. It
-// is defined here to avoid a circular dependency.
type LoggerV2 interface {
// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
Info(args ...any)
@@ -107,14 +65,13 @@ type LoggerV2 interface {
// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
// DepthLoggerV2, the below functions will be called with the appropriate stack
// depth set for trivial functions the logger may ignore.
-// This is a copy of the DepthLoggerV2 defined in the external grpclog package.
-// It is defined here to avoid a circular dependency.
//
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
type DepthLoggerV2 interface {
+ LoggerV2
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
InfoDepth(depth int, args ...any)
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
@@ -124,3 +81,124 @@ type DepthLoggerV2 interface {
// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
FatalDepth(depth int, args ...any)
}
+
+const (
+ // infoLog indicates Info severity.
+ infoLog int = iota
+ // warningLog indicates Warning severity.
+ warningLog
+ // errorLog indicates Error severity.
+ errorLog
+ // fatalLog indicates Fatal severity.
+ fatalLog
+)
+
+// severityName contains the string representation of each severity.
+var severityName = []string{
+ infoLog: "INFO",
+ warningLog: "WARNING",
+ errorLog: "ERROR",
+ fatalLog: "FATAL",
+}
+
+// loggerT is the default logger used by grpclog.
+type loggerT struct {
+ m []*log.Logger
+ v int
+ jsonFormat bool
+}
+
+func (g *loggerT) output(severity int, s string) {
+ sevStr := severityName[severity]
+ if !g.jsonFormat {
+ g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s))
+ return
+ }
+ // TODO: we can also include the logging component, but that needs more
+ // (API) changes.
+ b, _ := json.Marshal(map[string]string{
+ "severity": sevStr,
+ "message": s,
+ })
+ g.m[severity].Output(2, string(b))
+}
+
+func (g *loggerT) Info(args ...any) {
+ g.output(infoLog, fmt.Sprint(args...))
+}
+
+func (g *loggerT) Infoln(args ...any) {
+ g.output(infoLog, fmt.Sprintln(args...))
+}
+
+func (g *loggerT) Infof(format string, args ...any) {
+ g.output(infoLog, fmt.Sprintf(format, args...))
+}
+
+func (g *loggerT) Warning(args ...any) {
+ g.output(warningLog, fmt.Sprint(args...))
+}
+
+func (g *loggerT) Warningln(args ...any) {
+ g.output(warningLog, fmt.Sprintln(args...))
+}
+
+func (g *loggerT) Warningf(format string, args ...any) {
+ g.output(warningLog, fmt.Sprintf(format, args...))
+}
+
+func (g *loggerT) Error(args ...any) {
+ g.output(errorLog, fmt.Sprint(args...))
+}
+
+func (g *loggerT) Errorln(args ...any) {
+ g.output(errorLog, fmt.Sprintln(args...))
+}
+
+func (g *loggerT) Errorf(format string, args ...any) {
+ g.output(errorLog, fmt.Sprintf(format, args...))
+}
+
+func (g *loggerT) Fatal(args ...any) {
+ g.output(fatalLog, fmt.Sprint(args...))
+ os.Exit(1)
+}
+
+func (g *loggerT) Fatalln(args ...any) {
+ g.output(fatalLog, fmt.Sprintln(args...))
+ os.Exit(1)
+}
+
+func (g *loggerT) Fatalf(format string, args ...any) {
+ g.output(fatalLog, fmt.Sprintf(format, args...))
+ os.Exit(1)
+}
+
+func (g *loggerT) V(l int) bool {
+ return l <= g.v
+}
+
+// LoggerV2Config configures the LoggerV2 implementation.
+type LoggerV2Config struct {
+ // Verbosity sets the verbosity level of the logger.
+ Verbosity int
+ // FormatJSON controls whether the logger should output logs in JSON format.
+ FormatJSON bool
+}
+
+// NewLoggerV2 creates a new LoggerV2 instance with the provided configuration.
+// The infoW, warningW, and errorW writers are used to write log messages of
+// different severity levels.
+func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 {
+ var m []*log.Logger
+ flag := log.LstdFlags
+ if c.FormatJSON {
+ flag = 0
+ }
+ m = append(m, log.New(infoW, "", flag))
+ m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag))
+ ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal.
+ m = append(m, log.New(ew, "", flag))
+ m = append(m, log.New(ew, "", flag))
+ return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON}
+}
diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go
index b1674d8267..4b20358570 100644
--- a/vendor/google.golang.org/grpc/grpclog/logger.go
+++ b/vendor/google.golang.org/grpc/grpclog/logger.go
@@ -18,70 +18,17 @@
package grpclog
-import "google.golang.org/grpc/internal/grpclog"
+import "google.golang.org/grpc/grpclog/internal"
// Logger mimics golang's standard Logger as an interface.
//
// Deprecated: use LoggerV2.
-type Logger interface {
- Fatal(args ...any)
- Fatalf(format string, args ...any)
- Fatalln(args ...any)
- Print(args ...any)
- Printf(format string, args ...any)
- Println(args ...any)
-}
+type Logger internal.Logger
// SetLogger sets the logger that is used in grpc. Call only from
// init() functions.
//
// Deprecated: use SetLoggerV2.
func SetLogger(l Logger) {
- grpclog.Logger = &loggerWrapper{Logger: l}
-}
-
-// loggerWrapper wraps Logger into a LoggerV2.
-type loggerWrapper struct {
- Logger
-}
-
-func (g *loggerWrapper) Info(args ...any) {
- g.Logger.Print(args...)
-}
-
-func (g *loggerWrapper) Infoln(args ...any) {
- g.Logger.Println(args...)
-}
-
-func (g *loggerWrapper) Infof(format string, args ...any) {
- g.Logger.Printf(format, args...)
-}
-
-func (g *loggerWrapper) Warning(args ...any) {
- g.Logger.Print(args...)
-}
-
-func (g *loggerWrapper) Warningln(args ...any) {
- g.Logger.Println(args...)
-}
-
-func (g *loggerWrapper) Warningf(format string, args ...any) {
- g.Logger.Printf(format, args...)
-}
-
-func (g *loggerWrapper) Error(args ...any) {
- g.Logger.Print(args...)
-}
-
-func (g *loggerWrapper) Errorln(args ...any) {
- g.Logger.Println(args...)
-}
-
-func (g *loggerWrapper) Errorf(format string, args ...any) {
- g.Logger.Printf(format, args...)
-}
-
-func (g *loggerWrapper) V(l int) bool {
- // Returns true for all verbose level.
- return true
+ internal.LoggerV2Impl = &internal.LoggerWrapper{Logger: l}
}
diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go
index ecfd36d713..892dc13d16 100644
--- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go
+++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go
@@ -19,52 +19,16 @@
package grpclog
import (
- "encoding/json"
- "fmt"
"io"
- "log"
"os"
"strconv"
"strings"
- "google.golang.org/grpc/internal/grpclog"
+ "google.golang.org/grpc/grpclog/internal"
)
// LoggerV2 does underlying logging work for grpclog.
-type LoggerV2 interface {
- // Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
- Info(args ...any)
- // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
- Infoln(args ...any)
- // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
- Infof(format string, args ...any)
- // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
- Warning(args ...any)
- // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
- Warningln(args ...any)
- // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
- Warningf(format string, args ...any)
- // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
- Error(args ...any)
- // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
- Errorln(args ...any)
- // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
- Errorf(format string, args ...any)
- // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
- // gRPC ensures that all Fatal logs will exit with os.Exit(1).
- // Implementations may also call os.Exit() with a non-zero exit code.
- Fatal(args ...any)
- // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
- // gRPC ensures that all Fatal logs will exit with os.Exit(1).
- // Implementations may also call os.Exit() with a non-zero exit code.
- Fatalln(args ...any)
- // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
- // gRPC ensures that all Fatal logs will exit with os.Exit(1).
- // Implementations may also call os.Exit() with a non-zero exit code.
- Fatalf(format string, args ...any)
- // V reports whether verbosity level l is at least the requested verbose level.
- V(l int) bool
-}
+type LoggerV2 internal.LoggerV2
// SetLoggerV2 sets logger that is used in grpc to a V2 logger.
// Not mutex-protected, should be called before any gRPC functions.
@@ -72,34 +36,8 @@ func SetLoggerV2(l LoggerV2) {
if _, ok := l.(*componentData); ok {
panic("cannot use component logger as grpclog logger")
}
- grpclog.Logger = l
- grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2)
-}
-
-const (
- // infoLog indicates Info severity.
- infoLog int = iota
- // warningLog indicates Warning severity.
- warningLog
- // errorLog indicates Error severity.
- errorLog
- // fatalLog indicates Fatal severity.
- fatalLog
-)
-
-// severityName contains the string representation of each severity.
-var severityName = []string{
- infoLog: "INFO",
- warningLog: "WARNING",
- errorLog: "ERROR",
- fatalLog: "FATAL",
-}
-
-// loggerT is the default logger used by grpclog.
-type loggerT struct {
- m []*log.Logger
- v int
- jsonFormat bool
+ internal.LoggerV2Impl = l
+ internal.DepthLoggerV2Impl, _ = l.(internal.DepthLoggerV2)
}
// NewLoggerV2 creates a loggerV2 with the provided writers.
@@ -108,32 +46,13 @@ type loggerT struct {
// Warning logs will be written to warningW and infoW.
// Info logs will be written to infoW.
func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 {
- return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{})
+ return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{})
}
// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and
// verbosity level.
func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 {
- return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v})
-}
-
-type loggerV2Config struct {
- verbose int
- jsonFormat bool
-}
-
-func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 {
- var m []*log.Logger
- flag := log.LstdFlags
- if c.jsonFormat {
- flag = 0
- }
- m = append(m, log.New(infoW, "", flag))
- m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag))
- ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal.
- m = append(m, log.New(ew, "", flag))
- m = append(m, log.New(ew, "", flag))
- return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat}
+ return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{Verbosity: v})
}
// newLoggerV2 creates a loggerV2 to be used as default logger.
@@ -161,80 +80,10 @@ func newLoggerV2() LoggerV2 {
jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json")
- return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{
- verbose: v,
- jsonFormat: jsonFormat,
- })
-}
-
-func (g *loggerT) output(severity int, s string) {
- sevStr := severityName[severity]
- if !g.jsonFormat {
- g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s))
- return
- }
- // TODO: we can also include the logging component, but that needs more
- // (API) changes.
- b, _ := json.Marshal(map[string]string{
- "severity": sevStr,
- "message": s,
+ return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{
+ Verbosity: v,
+ FormatJSON: jsonFormat,
})
- g.m[severity].Output(2, string(b))
-}
-
-func (g *loggerT) Info(args ...any) {
- g.output(infoLog, fmt.Sprint(args...))
-}
-
-func (g *loggerT) Infoln(args ...any) {
- g.output(infoLog, fmt.Sprintln(args...))
-}
-
-func (g *loggerT) Infof(format string, args ...any) {
- g.output(infoLog, fmt.Sprintf(format, args...))
-}
-
-func (g *loggerT) Warning(args ...any) {
- g.output(warningLog, fmt.Sprint(args...))
-}
-
-func (g *loggerT) Warningln(args ...any) {
- g.output(warningLog, fmt.Sprintln(args...))
-}
-
-func (g *loggerT) Warningf(format string, args ...any) {
- g.output(warningLog, fmt.Sprintf(format, args...))
-}
-
-func (g *loggerT) Error(args ...any) {
- g.output(errorLog, fmt.Sprint(args...))
-}
-
-func (g *loggerT) Errorln(args ...any) {
- g.output(errorLog, fmt.Sprintln(args...))
-}
-
-func (g *loggerT) Errorf(format string, args ...any) {
- g.output(errorLog, fmt.Sprintf(format, args...))
-}
-
-func (g *loggerT) Fatal(args ...any) {
- g.output(fatalLog, fmt.Sprint(args...))
- os.Exit(1)
-}
-
-func (g *loggerT) Fatalln(args ...any) {
- g.output(fatalLog, fmt.Sprintln(args...))
- os.Exit(1)
-}
-
-func (g *loggerT) Fatalf(format string, args ...any) {
- g.output(fatalLog, fmt.Sprintf(format, args...))
- os.Exit(1)
-}
-
-func (g *loggerT) V(l int) bool {
- return l <= g.v
}
// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
@@ -245,14 +94,4 @@ func (g *loggerT) V(l int) bool {
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
-type DepthLoggerV2 interface {
- LoggerV2
- // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
- InfoDepth(depth int, args ...any)
- // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
- WarningDepth(depth int, args ...any)
- // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
- ErrorDepth(depth int, args ...any)
- // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
- FatalDepth(depth int, args ...any)
-}
+type DepthLoggerV2 internal.DepthLoggerV2
diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
index 38b8835073..d92335445f 100644
--- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
+++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
@@ -17,8 +17,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.1
-// protoc v4.25.2
+// protoc-gen-go v1.34.2
+// protoc v5.27.1
// source: grpc/health/v1/health.proto
package grpc_health_v1
@@ -237,7 +237,7 @@ func file_grpc_health_v1_health_proto_rawDescGZIP() []byte {
var file_grpc_health_v1_health_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
-var file_grpc_health_v1_health_proto_goTypes = []interface{}{
+var file_grpc_health_v1_health_proto_goTypes = []any{
(HealthCheckResponse_ServingStatus)(0), // 0: grpc.health.v1.HealthCheckResponse.ServingStatus
(*HealthCheckRequest)(nil), // 1: grpc.health.v1.HealthCheckRequest
(*HealthCheckResponse)(nil), // 2: grpc.health.v1.HealthCheckResponse
@@ -261,7 +261,7 @@ func file_grpc_health_v1_health_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*HealthCheckRequest); i {
case 0:
return &v.state
@@ -273,7 +273,7 @@ func file_grpc_health_v1_health_proto_init() {
return nil
}
}
- file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*HealthCheckResponse); i {
case 0:
return &v.state
diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
index 51b736ba06..f96b8ab492 100644
--- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
+++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
@@ -17,8 +17,8 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
-// - protoc-gen-go-grpc v1.4.0
-// - protoc v4.25.2
+// - protoc-gen-go-grpc v1.5.1
+// - protoc v5.27.1
// source: grpc/health/v1/health.proto
package grpc_health_v1
@@ -32,8 +32,8 @@ import (
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
-// Requires gRPC-Go v1.62.0 or later.
-const _ = grpc.SupportPackageIsVersion8
+// Requires gRPC-Go v1.64.0 or later.
+const _ = grpc.SupportPackageIsVersion9
const (
Health_Check_FullMethodName = "/grpc.health.v1.Health/Check"
@@ -73,7 +73,7 @@ type HealthClient interface {
// should assume this method is not supported and should not retry the
// call. If the call terminates with any other status (including OK),
// clients should retry the call with appropriate exponential backoff.
- Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error)
+ Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error)
}
type healthClient struct {
@@ -94,13 +94,13 @@ func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts .
return out, nil
}
-func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) {
+func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
- x := &healthWatchClient{ClientStream: stream}
+ x := &grpc.GenericClientStream[HealthCheckRequest, HealthCheckResponse]{ClientStream: stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
@@ -110,26 +110,12 @@ func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts .
return x, nil
}
-type Health_WatchClient interface {
- Recv() (*HealthCheckResponse, error)
- grpc.ClientStream
-}
-
-type healthWatchClient struct {
- grpc.ClientStream
-}
-
-func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) {
- m := new(HealthCheckResponse)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
+// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
+type Health_WatchClient = grpc.ServerStreamingClient[HealthCheckResponse]
// HealthServer is the server API for Health service.
// All implementations should embed UnimplementedHealthServer
-// for forward compatibility
+// for forward compatibility.
//
// Health is gRPC's mechanism for checking whether a server is able to handle
// RPCs. Its semantics are documented in
@@ -160,19 +146,23 @@ type HealthServer interface {
// should assume this method is not supported and should not retry the
// call. If the call terminates with any other status (including OK),
// clients should retry the call with appropriate exponential backoff.
- Watch(*HealthCheckRequest, Health_WatchServer) error
+ Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error
}
-// UnimplementedHealthServer should be embedded to have forward compatible implementations.
-type UnimplementedHealthServer struct {
-}
+// UnimplementedHealthServer should be embedded to have
+// forward compatible implementations.
+//
+// NOTE: this should be embedded by value instead of pointer to avoid a nil
+// pointer dereference when methods are called.
+type UnimplementedHealthServer struct{}
func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Check not implemented")
}
-func (UnimplementedHealthServer) Watch(*HealthCheckRequest, Health_WatchServer) error {
+func (UnimplementedHealthServer) Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error {
return status.Errorf(codes.Unimplemented, "method Watch not implemented")
}
+func (UnimplementedHealthServer) testEmbeddedByValue() {}
// UnsafeHealthServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to HealthServer will
@@ -182,6 +172,13 @@ type UnsafeHealthServer interface {
}
func RegisterHealthServer(s grpc.ServiceRegistrar, srv HealthServer) {
+ // If the following call panics, it indicates UnimplementedHealthServer was
+ // embedded by pointer and is nil. This will cause panics if an
+ // unimplemented method is ever invoked, so we test this at initialization
+ // time to prevent it from happening at runtime later due to I/O.
+ if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
+ t.testEmbeddedByValue()
+ }
s.RegisterService(&Health_ServiceDesc, srv)
}
@@ -208,21 +205,11 @@ func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error {
if err := stream.RecvMsg(m); err != nil {
return err
}
- return srv.(HealthServer).Watch(m, &healthWatchServer{ServerStream: stream})
-}
-
-type Health_WatchServer interface {
- Send(*HealthCheckResponse) error
- grpc.ServerStream
+ return srv.(HealthServer).Watch(m, &grpc.GenericServerStream[HealthCheckRequest, HealthCheckResponse]{ServerStream: stream})
}
-type healthWatchServer struct {
- grpc.ServerStream
-}
-
-func (x *healthWatchServer) Send(m *HealthCheckResponse) error {
- return x.ServerStream.SendMsg(m)
-}
+// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
+type Health_WatchServer = grpc.ServerStreamingServer[HealthCheckResponse]
// Health_ServiceDesc is the grpc.ServiceDesc for Health service.
// It's only intended for direct use with grpc.RegisterService,
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
index aa4505a871..9669328914 100644
--- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
+++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
@@ -106,7 +106,7 @@ func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry
}
// Log creates a proto binary log entry, and logs it to the sink.
-func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) {
+func (ml *TruncatingMethodLogger) Log(_ context.Context, c LogEntryConfig) {
ml.sink.Write(ml.Build(c))
}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go
index dfe18b0892..64c791953d 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go
@@ -46,7 +46,7 @@ type entry interface {
// channelMap is the storage data structure for channelz.
//
-// Methods of channelMap can be divided in two two categories with respect to
+// Methods of channelMap can be divided into two categories with respect to
// locking.
//
// 1. Methods acquire the global lock.
@@ -234,13 +234,6 @@ func copyMap(m map[int64]string) map[int64]string {
return n
}
-func min(a, b int) int {
- if a < b {
- return a
- }
- return b
-}
-
func (c *channelMap) getTopChannels(id int64, maxResults int) ([]*Channel, bool) {
if maxResults <= 0 {
maxResults = EntriesPerPage
diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
index 03e24e1507..078bb81238 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
@@ -33,7 +33,7 @@ var (
// outside this package except by tests.
IDGen IDGenerator
- db *channelMap = newChannelMap()
+ db = newChannelMap()
// EntriesPerPage defines the number of channelz entries to be shown on a web page.
EntriesPerPage = 50
curState int32
diff --git a/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go
index d1ed8df6a5..0e6e18e185 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go
@@ -35,13 +35,13 @@ type SocketOptionData struct {
// Getsockopt defines the function to get socket options requested by channelz.
// It is to be passed to syscall.RawConn.Control().
// Windows OS doesn't support Socket Option
-func (s *SocketOptionData) Getsockopt(fd uintptr) {
+func (s *SocketOptionData) Getsockopt(uintptr) {
once.Do(func() {
logger.Warning("Channelz: socket options are not supported on non-linux environments")
})
}
// GetSocketOption gets the socket option info of the conn.
-func GetSocketOption(c any) *SocketOptionData {
+func GetSocketOption(any) *SocketOptionData {
return nil
}
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
index d906487139..452985f8d8 100644
--- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
+++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
@@ -45,7 +45,11 @@ var (
// option is present for backward compatibility. This option may be overridden
// by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true"
// or "false".
- EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", false)
+ EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true)
+ // XDSFallbackSupport is the env variable that controls whether support for
+ // xDS fallback is turned on. If this is unset or is false, only the first
+ // xDS server in the list of server configs will be used.
+ XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", false)
)
func boolFromEnv(envVar string, def bool) bool {
diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go
index 7f7044e173..7617be2158 100644
--- a/vendor/google.golang.org/grpc/internal/experimental.go
+++ b/vendor/google.golang.org/grpc/internal/experimental.go
@@ -18,11 +18,11 @@
package internal
var (
- // WithRecvBufferPool is implemented by the grpc package and returns a dial
+ // WithBufferPool is implemented by the grpc package and returns a dial
// option to configure a shared buffer pool for a grpc.ClientConn.
- WithRecvBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption
+ WithBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption
- // RecvBufferPool is implemented by the grpc package and returns a server
+ // BufferPool is implemented by the grpc package and returns a server
// option to configure a shared buffer pool for a grpc.Server.
- RecvBufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption
+ BufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption
)
diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go
similarity index 63%
rename from vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
rename to vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go
index faa998de76..092ad187a2 100644
--- a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
+++ b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go
@@ -16,17 +16,21 @@
*
*/
+// Package grpclog provides logging functionality for internal gRPC packages,
+// outside of the functionality provided by the external `grpclog` package.
package grpclog
import (
"fmt"
+
+ "google.golang.org/grpc/grpclog"
)
// PrefixLogger does logging with a prefix.
//
// Logging method on a nil logs without any prefix.
type PrefixLogger struct {
- logger DepthLoggerV2
+ logger grpclog.DepthLoggerV2
prefix string
}
@@ -38,7 +42,7 @@ func (pl *PrefixLogger) Infof(format string, args ...any) {
pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
return
}
- InfoDepth(1, fmt.Sprintf(format, args...))
+ grpclog.InfoDepth(1, fmt.Sprintf(format, args...))
}
// Warningf does warning logging.
@@ -48,7 +52,7 @@ func (pl *PrefixLogger) Warningf(format string, args ...any) {
pl.logger.WarningDepth(1, fmt.Sprintf(format, args...))
return
}
- WarningDepth(1, fmt.Sprintf(format, args...))
+ grpclog.WarningDepth(1, fmt.Sprintf(format, args...))
}
// Errorf does error logging.
@@ -58,36 +62,18 @@ func (pl *PrefixLogger) Errorf(format string, args ...any) {
pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...))
return
}
- ErrorDepth(1, fmt.Sprintf(format, args...))
-}
-
-// Debugf does info logging at verbose level 2.
-func (pl *PrefixLogger) Debugf(format string, args ...any) {
- // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe
- // rewrite PrefixLogger a little to ensure that we don't use the global
- // `Logger` here, and instead use the `logger` field.
- if !Logger.V(2) {
- return
- }
- if pl != nil {
- // Handle nil, so the tests can pass in a nil logger.
- format = pl.prefix + format
- pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
- return
- }
- InfoDepth(1, fmt.Sprintf(format, args...))
-
+ grpclog.ErrorDepth(1, fmt.Sprintf(format, args...))
}
// V reports whether verbosity level l is at least the requested verbose level.
func (pl *PrefixLogger) V(l int) bool {
- // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe
- // rewrite PrefixLogger a little to ensure that we don't use the global
- // `Logger` here, and instead use the `logger` field.
- return Logger.V(l)
+ if pl != nil {
+ return pl.logger.V(l)
+ }
+ return true
}
// NewPrefixLogger creates a prefix logger with the given prefix.
-func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger {
+func NewPrefixLogger(logger grpclog.DepthLoggerV2, prefix string) *PrefixLogger {
return &PrefixLogger{logger: logger, prefix: prefix}
}
diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
index f7f40a16ac..19b9d63927 100644
--- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
+++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
@@ -53,16 +53,28 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer {
return cs
}
-// Schedule adds a callback to be scheduled after existing callbacks are run.
+// TrySchedule tries to schedules the provided callback function f to be
+// executed in the order it was added. This is a best-effort operation. If the
+// context passed to NewCallbackSerializer was canceled before this method is
+// called, the callback will not be scheduled.
//
// Callbacks are expected to honor the context when performing any blocking
// operations, and should return early when the context is canceled.
+func (cs *CallbackSerializer) TrySchedule(f func(ctx context.Context)) {
+ cs.callbacks.Put(f)
+}
+
+// ScheduleOr schedules the provided callback function f to be executed in the
+// order it was added. If the context passed to NewCallbackSerializer has been
+// canceled before this method is called, the onFailure callback will be
+// executed inline instead.
//
-// Return value indicates if the callback was successfully added to the list of
-// callbacks to be executed by the serializer. It is not possible to add
-// callbacks once the context passed to NewCallbackSerializer is cancelled.
-func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool {
- return cs.callbacks.Put(f) == nil
+// Callbacks are expected to honor the context when performing any blocking
+// operations, and should return early when the context is canceled.
+func (cs *CallbackSerializer) ScheduleOr(f func(ctx context.Context), onFailure func()) {
+ if cs.callbacks.Put(f) != nil {
+ onFailure()
+ }
}
func (cs *CallbackSerializer) run(ctx context.Context) {
diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go
index aef8cec1ab..6d8c2f518d 100644
--- a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go
+++ b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go
@@ -77,7 +77,7 @@ func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) {
if ps.msg != nil {
msg := ps.msg
- ps.cs.Schedule(func(context.Context) {
+ ps.cs.TrySchedule(func(context.Context) {
ps.mu.Lock()
defer ps.mu.Unlock()
if !ps.subscribers[sub] {
@@ -103,7 +103,7 @@ func (ps *PubSub) Publish(msg any) {
ps.msg = msg
for sub := range ps.subscribers {
s := sub
- ps.cs.Schedule(func(context.Context) {
+ ps.cs.TrySchedule(func(context.Context) {
ps.mu.Lock()
defer ps.mu.Unlock()
if !ps.subscribers[s] {
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
index 5d66539869..7aae9240ff 100644
--- a/vendor/google.golang.org/grpc/internal/internal.go
+++ b/vendor/google.golang.org/grpc/internal/internal.go
@@ -183,7 +183,7 @@ var (
// GRPCResolverSchemeExtraMetadata determines when gRPC will add extra
// metadata to RPCs.
- GRPCResolverSchemeExtraMetadata string = "xds"
+ GRPCResolverSchemeExtraMetadata = "xds"
// EnterIdleModeForTesting gets the ClientConn to enter IDLE mode.
EnterIdleModeForTesting any // func(*grpc.ClientConn)
@@ -203,11 +203,31 @@ var (
// UserSetDefaultScheme is set to true if the user has overridden the
// default resolver scheme.
- UserSetDefaultScheme bool = false
+ UserSetDefaultScheme = false
// ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n
// is the number of elements. swap swaps the elements with indexes i and j.
ShuffleAddressListForTesting any // func(n int, swap func(i, j int))
+
+ // ConnectedAddress returns the connected address for a SubConnState. The
+ // address is only valid if the state is READY.
+ ConnectedAddress any // func (scs SubConnState) resolver.Address
+
+ // SetConnectedAddress sets the connected address for a SubConnState.
+ SetConnectedAddress any // func(scs *SubConnState, addr resolver.Address)
+
+ // SnapshotMetricRegistryForTesting snapshots the global data of the metric
+ // registry. Returns a cleanup function that sets the metric registry to its
+ // original state. Only called in testing functions.
+ SnapshotMetricRegistryForTesting func() func()
+
+ // SetDefaultBufferPoolForTesting updates the default buffer pool, for
+ // testing purposes.
+ SetDefaultBufferPoolForTesting any // func(mem.BufferPool)
+
+ // SetBufferPoolingThresholdForTesting updates the buffer pooling threshold, for
+ // testing purposes.
+ SetBufferPoolingThresholdForTesting any // func(int)
)
// HealthChecker defines the signature of the client-side LB channel health
diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
index afac56572a..b901c7bace 100644
--- a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
+++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
@@ -55,7 +55,7 @@ func (r *passthroughResolver) start() {
r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}})
}
-func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {}
+func (*passthroughResolver) ResolveNow(resolver.ResolveNowOptions) {}
func (*passthroughResolver) Close() {}
diff --git a/vendor/google.golang.org/grpc/internal/stats/labels.go b/vendor/google.golang.org/grpc/internal/stats/labels.go
new file mode 100644
index 0000000000..fd33af51ae
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/stats/labels.go
@@ -0,0 +1,42 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package stats provides internal stats related functionality.
+package stats
+
+import "context"
+
+// Labels are the labels for metrics.
+type Labels struct {
+ // TelemetryLabels are the telemetry labels to record.
+ TelemetryLabels map[string]string
+}
+
+type labelsKey struct{}
+
+// GetLabels returns the Labels stored in the context, or nil if there is one.
+func GetLabels(ctx context.Context) *Labels {
+ labels, _ := ctx.Value(labelsKey{}).(*Labels)
+ return labels
+}
+
+// SetLabels sets the Labels in the context.
+func SetLabels(ctx context.Context, labels *Labels) context.Context {
+ // could also append
+ return context.WithValue(ctx, labelsKey{}, labels)
+}
diff --git a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go
new file mode 100644
index 0000000000..be110d41f9
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package stats
+
+import (
+ "fmt"
+
+ estats "google.golang.org/grpc/experimental/stats"
+ "google.golang.org/grpc/stats"
+)
+
+// MetricsRecorderList forwards Record calls to all of its metricsRecorders.
+//
+// It eats any record calls where the label values provided do not match the
+// number of label keys.
+type MetricsRecorderList struct {
+ // metricsRecorders are the metrics recorders this list will forward to.
+ metricsRecorders []estats.MetricsRecorder
+}
+
+// NewMetricsRecorderList creates a new metric recorder list with all the stats
+// handlers provided which implement the MetricsRecorder interface.
+// If no stats handlers provided implement the MetricsRecorder interface,
+// the MetricsRecorder list returned is a no-op.
+func NewMetricsRecorderList(shs []stats.Handler) *MetricsRecorderList {
+ var mrs []estats.MetricsRecorder
+ for _, sh := range shs {
+ if mr, ok := sh.(estats.MetricsRecorder); ok {
+ mrs = append(mrs, mr)
+ }
+ }
+ return &MetricsRecorderList{
+ metricsRecorders: mrs,
+ }
+}
+
+func verifyLabels(desc *estats.MetricDescriptor, labelsRecv ...string) {
+ if got, want := len(labelsRecv), len(desc.Labels)+len(desc.OptionalLabels); got != want {
+ panic(fmt.Sprintf("Received %d labels in call to record metric %q, but expected %d.", got, desc.Name, want))
+ }
+}
+
+func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, incr int64, labels ...string) {
+ verifyLabels(handle.Descriptor(), labels...)
+
+ for _, metricRecorder := range l.metricsRecorders {
+ metricRecorder.RecordInt64Count(handle, incr, labels...)
+ }
+}
+
+func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) {
+ verifyLabels(handle.Descriptor(), labels...)
+
+ for _, metricRecorder := range l.metricsRecorders {
+ metricRecorder.RecordFloat64Count(handle, incr, labels...)
+ }
+}
+
+func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, incr int64, labels ...string) {
+ verifyLabels(handle.Descriptor(), labels...)
+
+ for _, metricRecorder := range l.metricsRecorders {
+ metricRecorder.RecordInt64Histo(handle, incr, labels...)
+ }
+}
+
+func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHandle, incr float64, labels ...string) {
+ verifyLabels(handle.Descriptor(), labels...)
+
+ for _, metricRecorder := range l.metricsRecorders {
+ metricRecorder.RecordFloat64Histo(handle, incr, labels...)
+ }
+}
+
+func (l *MetricsRecorderList) RecordInt64Gauge(handle *estats.Int64GaugeHandle, incr int64, labels ...string) {
+ verifyLabels(handle.Descriptor(), labels...)
+
+ for _, metricRecorder := range l.metricsRecorders {
+ metricRecorder.RecordInt64Gauge(handle, incr, labels...)
+ }
+}
diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go
index c7dbc82059..757925381f 100644
--- a/vendor/google.golang.org/grpc/internal/status/status.go
+++ b/vendor/google.golang.org/grpc/internal/status/status.go
@@ -138,11 +138,11 @@ func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) {
// s.Code() != OK implies that s.Proto() != nil.
p := s.Proto()
for _, detail := range details {
- any, err := anypb.New(protoadapt.MessageV2Of(detail))
+ m, err := anypb.New(protoadapt.MessageV2Of(detail))
if err != nil {
return nil, err
}
- p.Details = append(p.Details, any)
+ p.Details = append(p.Details, m)
}
return &Status{s: p}, nil
}
diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
index 999f52cd75..54c24c2ff3 100644
--- a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
+++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
@@ -58,20 +58,20 @@ func GetRusage() *Rusage {
// CPUTimeDiff returns the differences of user CPU time and system CPU time used
// between two Rusage structs. It a no-op function for non-linux environments.
-func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
+func CPUTimeDiff(*Rusage, *Rusage) (float64, float64) {
log()
return 0, 0
}
// SetTCPUserTimeout is a no-op function under non-linux environments.
-func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error {
+func SetTCPUserTimeout(net.Conn, time.Duration) error {
log()
return nil
}
// GetTCPUserTimeout is a no-op function under non-linux environments.
// A negative return value indicates the operation is not supported
-func GetTCPUserTimeout(conn net.Conn) (int, error) {
+func GetTCPUserTimeout(net.Conn) (int, error) {
log()
return -1, nil
}
diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go
index 078137b7fd..7e7aaa5463 100644
--- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go
+++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go
@@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer {
// combination of unconditionally enabling TCP keepalives here, and
// disabling the overriding of TCP keepalive parameters by setting the
// KeepAlive field to a negative value above, results in OS defaults for
- // the TCP keealive interval and time parameters.
+ // the TCP keepalive interval and time parameters.
Control: func(_, _ string, c syscall.RawConn) error {
return c.Control(func(fd uintptr) {
unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1)
diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go
index fd7d43a890..d5c1085eea 100644
--- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go
+++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go
@@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer {
// combination of unconditionally enabling TCP keepalives here, and
// disabling the overriding of TCP keepalive parameters by setting the
// KeepAlive field to a negative value above, results in OS defaults for
- // the TCP keealive interval and time parameters.
+ // the TCP keepalive interval and time parameters.
Control: func(_, _ string, c syscall.RawConn) error {
return c.Control(func(fd uintptr) {
windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1)
diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
index 3deadfb4a2..ef72fbb3a0 100644
--- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
+++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
@@ -32,6 +32,7 @@ import (
"golang.org/x/net/http2/hpack"
"google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/grpcutil"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/status"
)
@@ -148,9 +149,9 @@ type dataFrame struct {
streamID uint32
endStream bool
h []byte
- d []byte
+ reader mem.Reader
// onEachWrite is called every time
- // a part of d is written out.
+ // a part of data is written out.
onEachWrite func()
}
@@ -289,18 +290,22 @@ func (l *outStreamList) dequeue() *outStream {
}
// controlBuffer is a way to pass information to loopy.
-// Information is passed as specific struct types called control frames.
-// A control frame not only represents data, messages or headers to be sent out
-// but can also be used to instruct loopy to update its internal state.
-// It shouldn't be confused with an HTTP2 frame, although some of the control frames
-// like dataFrame and headerFrame do go out on wire as HTTP2 frames.
+//
+// Information is passed as specific struct types called control frames. A
+// control frame not only represents data, messages or headers to be sent out
+// but can also be used to instruct loopy to update its internal state. It
+// shouldn't be confused with an HTTP2 frame, although some of the control
+// frames like dataFrame and headerFrame do go out on wire as HTTP2 frames.
type controlBuffer struct {
- ch chan struct{}
- done <-chan struct{}
+ wakeupCh chan struct{} // Unblocks readers waiting for something to read.
+ done <-chan struct{} // Closed when the transport is done.
+
+ // Mutex guards all the fields below, except trfChan which can be read
+ // atomically without holding mu.
mu sync.Mutex
- consumerWaiting bool
- list *itemList
- err error
+ consumerWaiting bool // True when readers are blocked waiting for new data.
+ closed bool // True when the controlbuf is finished.
+ list *itemList // List of queued control frames.
// transportResponseFrames counts the number of queued items that represent
// the response of an action initiated by the peer. trfChan is created
@@ -308,47 +313,59 @@ type controlBuffer struct {
// closed and nilled when transportResponseFrames drops below the
// threshold. Both fields are protected by mu.
transportResponseFrames int
- trfChan atomic.Value // chan struct{}
+ trfChan atomic.Pointer[chan struct{}]
}
func newControlBuffer(done <-chan struct{}) *controlBuffer {
return &controlBuffer{
- ch: make(chan struct{}, 1),
- list: &itemList{},
- done: done,
+ wakeupCh: make(chan struct{}, 1),
+ list: &itemList{},
+ done: done,
}
}
-// throttle blocks if there are too many incomingSettings/cleanupStreams in the
-// controlbuf.
+// throttle blocks if there are too many frames in the control buf that
+// represent the response of an action initiated by the peer, like
+// incomingSettings cleanupStreams etc.
func (c *controlBuffer) throttle() {
- ch, _ := c.trfChan.Load().(chan struct{})
- if ch != nil {
+ if ch := c.trfChan.Load(); ch != nil {
select {
- case <-ch:
+ case <-(*ch):
case <-c.done:
}
}
}
+// put adds an item to the controlbuf.
func (c *controlBuffer) put(it cbItem) error {
_, err := c.executeAndPut(nil, it)
return err
}
+// executeAndPut runs f, and if the return value is true, adds the given item to
+// the controlbuf. The item could be nil, in which case, this method simply
+// executes f and does not add the item to the controlbuf.
+//
+// The first return value indicates whether the item was successfully added to
+// the control buffer. A non-nil error, specifically ErrConnClosing, is returned
+// if the control buffer is already closed.
func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) {
- var wakeUp bool
c.mu.Lock()
- if c.err != nil {
- c.mu.Unlock()
- return false, c.err
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return false, ErrConnClosing
}
if f != nil {
if !f() { // f wasn't successful
- c.mu.Unlock()
return false, nil
}
}
+ if it == nil {
+ return true, nil
+ }
+
+ var wakeUp bool
if c.consumerWaiting {
wakeUp = true
c.consumerWaiting = false
@@ -359,98 +376,102 @@ func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) {
if c.transportResponseFrames == maxQueuedTransportResponseFrames {
// We are adding the frame that puts us over the threshold; create
// a throttling channel.
- c.trfChan.Store(make(chan struct{}))
+ ch := make(chan struct{})
+ c.trfChan.Store(&ch)
}
}
- c.mu.Unlock()
if wakeUp {
select {
- case c.ch <- struct{}{}:
+ case c.wakeupCh <- struct{}{}:
default:
}
}
return true, nil
}
-// Note argument f should never be nil.
-func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) {
- c.mu.Lock()
- if c.err != nil {
- c.mu.Unlock()
- return false, c.err
- }
- if !f(it) { // f wasn't successful
- c.mu.Unlock()
- return false, nil
- }
- c.mu.Unlock()
- return true, nil
-}
-
+// get returns the next control frame from the control buffer. If block is true
+// **and** there are no control frames in the control buffer, the call blocks
+// until one of the conditions is met: there is a frame to return or the
+// transport is closed.
func (c *controlBuffer) get(block bool) (any, error) {
for {
c.mu.Lock()
- if c.err != nil {
+ frame, err := c.getOnceLocked()
+ if frame != nil || err != nil || !block {
+ // If we read a frame or an error, we can return to the caller. The
+ // call to getOnceLocked() returns a nil frame and a nil error if
+ // there is nothing to read, and in that case, if the caller asked
+ // us not to block, we can return now as well.
c.mu.Unlock()
- return nil, c.err
- }
- if !c.list.isEmpty() {
- h := c.list.dequeue().(cbItem)
- if h.isTransportResponseFrame() {
- if c.transportResponseFrames == maxQueuedTransportResponseFrames {
- // We are removing the frame that put us over the
- // threshold; close and clear the throttling channel.
- ch := c.trfChan.Load().(chan struct{})
- close(ch)
- c.trfChan.Store((chan struct{})(nil))
- }
- c.transportResponseFrames--
- }
- c.mu.Unlock()
- return h, nil
- }
- if !block {
- c.mu.Unlock()
- return nil, nil
+ return frame, err
}
c.consumerWaiting = true
c.mu.Unlock()
+
+ // Release the lock above and wait to be woken up.
select {
- case <-c.ch:
+ case <-c.wakeupCh:
case <-c.done:
return nil, errors.New("transport closed by client")
}
}
}
+// Callers must not use this method, but should instead use get().
+//
+// Caller must hold c.mu.
+func (c *controlBuffer) getOnceLocked() (any, error) {
+ if c.closed {
+ return false, ErrConnClosing
+ }
+ if c.list.isEmpty() {
+ return nil, nil
+ }
+ h := c.list.dequeue().(cbItem)
+ if h.isTransportResponseFrame() {
+ if c.transportResponseFrames == maxQueuedTransportResponseFrames {
+ // We are removing the frame that put us over the
+ // threshold; close and clear the throttling channel.
+ ch := c.trfChan.Swap(nil)
+ close(*ch)
+ }
+ c.transportResponseFrames--
+ }
+ return h, nil
+}
+
+// finish closes the control buffer, cleaning up any streams that have queued
+// header frames. Once this method returns, no more frames can be added to the
+// control buffer, and attempts to do so will return ErrConnClosing.
func (c *controlBuffer) finish() {
c.mu.Lock()
- if c.err != nil {
- c.mu.Unlock()
+ defer c.mu.Unlock()
+
+ if c.closed {
return
}
- c.err = ErrConnClosing
+ c.closed = true
// There may be headers for streams in the control buffer.
// These streams need to be cleaned out since the transport
// is still not aware of these yet.
for head := c.list.dequeueAll(); head != nil; head = head.next {
- hdr, ok := head.it.(*headerFrame)
- if !ok {
- continue
- }
- if hdr.onOrphaned != nil { // It will be nil on the server-side.
- hdr.onOrphaned(ErrConnClosing)
+ switch v := head.it.(type) {
+ case *headerFrame:
+ if v.onOrphaned != nil { // It will be nil on the server-side.
+ v.onOrphaned(ErrConnClosing)
+ }
+ case *dataFrame:
+ _ = v.reader.Close()
}
}
+
// In case throttle() is currently in flight, it needs to be unblocked.
// Otherwise, the transport may not close, since the transport is closed by
// the reader encountering the connection error.
- ch, _ := c.trfChan.Load().(chan struct{})
+ ch := c.trfChan.Swap(nil)
if ch != nil {
- close(ch)
+ close(*ch)
}
- c.trfChan.Store((chan struct{})(nil))
- c.mu.Unlock()
}
type side int
@@ -466,7 +487,7 @@ const (
// stream maintains a queue of data frames; as loopy receives data frames
// it gets added to the queue of the relevant stream.
// Loopy goes over this list of active streams by processing one node every iteration,
-// thereby closely resemebling to a round-robin scheduling over all streams. While
+// thereby closely resembling a round-robin scheduling over all streams. While
// processing a stream, loopy writes out data bytes from this stream capped by the min
// of http2MaxFrameLen, connection-level flow control and stream-level flow control.
type loopyWriter struct {
@@ -490,12 +511,13 @@ type loopyWriter struct {
draining bool
conn net.Conn
logger *grpclog.PrefixLogger
+ bufferPool mem.BufferPool
// Side-specific handlers
ssGoAwayHandler func(*goAway) (bool, error)
}
-func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error)) *loopyWriter {
+func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error), bufferPool mem.BufferPool) *loopyWriter {
var buf bytes.Buffer
l := &loopyWriter{
side: s,
@@ -511,6 +533,7 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato
conn: conn,
logger: logger,
ssGoAwayHandler: goAwayHandler,
+ bufferPool: bufferPool,
}
return l
}
@@ -768,6 +791,11 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
// not be established yet.
delete(l.estdStreams, c.streamID)
str.deleteSelf()
+ for head := str.itl.dequeueAll(); head != nil; head = head.next {
+ if df, ok := head.it.(*dataFrame); ok {
+ _ = df.reader.Close()
+ }
+ }
}
if c.rst { // If RST_STREAM needs to be sent.
if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil {
@@ -903,16 +931,18 @@ func (l *loopyWriter) processData() (bool, error) {
dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream.
// A data item is represented by a dataFrame, since it later translates into
// multiple HTTP2 data frames.
- // Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data.
- // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the
- // maximum possible HTTP2 frame size.
+ // Every dataFrame has two buffers; h that keeps grpc-message header and data
+ // that is the actual message. As an optimization to keep wire traffic low, data
+ // from data is copied to h to make as big as the maximum possible HTTP2 frame
+ // size.
- if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame
+ if len(dataItem.h) == 0 && dataItem.reader.Remaining() == 0 { // Empty data frame
// Client sends out empty data frame with endStream = true
if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil {
return false, err
}
str.itl.dequeue() // remove the empty data item from stream
+ _ = dataItem.reader.Close()
if str.itl.isEmpty() {
str.state = empty
} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers.
@@ -927,9 +957,7 @@ func (l *loopyWriter) processData() (bool, error) {
}
return false, nil
}
- var (
- buf []byte
- )
+
// Figure out the maximum size we can send
maxSize := http2MaxFrameLen
if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control.
@@ -943,43 +971,50 @@ func (l *loopyWriter) processData() (bool, error) {
}
// Compute how much of the header and data we can send within quota and max frame length
hSize := min(maxSize, len(dataItem.h))
- dSize := min(maxSize-hSize, len(dataItem.d))
- if hSize != 0 {
- if dSize == 0 {
- buf = dataItem.h
- } else {
- // We can add some data to grpc message header to distribute bytes more equally across frames.
- // Copy on the stack to avoid generating garbage
- var localBuf [http2MaxFrameLen]byte
- copy(localBuf[:hSize], dataItem.h)
- copy(localBuf[hSize:], dataItem.d[:dSize])
- buf = localBuf[:hSize+dSize]
- }
+ dSize := min(maxSize-hSize, dataItem.reader.Remaining())
+ remainingBytes := len(dataItem.h) + dataItem.reader.Remaining() - hSize - dSize
+ size := hSize + dSize
+
+ var buf *[]byte
+
+ if hSize != 0 && dSize == 0 {
+ buf = &dataItem.h
} else {
- buf = dataItem.d
- }
+ // Note: this is only necessary because the http2.Framer does not support
+ // partially writing a frame, so the sequence must be materialized into a buffer.
+ // TODO: Revisit once https://github.com/golang/go/issues/66655 is addressed.
+ pool := l.bufferPool
+ if pool == nil {
+ // Note that this is only supposed to be nil in tests. Otherwise, stream is
+ // always initialized with a BufferPool.
+ pool = mem.DefaultBufferPool()
+ }
+ buf = pool.Get(size)
+ defer pool.Put(buf)
- size := hSize + dSize
+ copy((*buf)[:hSize], dataItem.h)
+ _, _ = dataItem.reader.Read((*buf)[hSize:])
+ }
// Now that outgoing flow controls are checked we can replenish str's write quota
str.wq.replenish(size)
var endStream bool
// If this is the last data message on this stream and all of it can be written in this iteration.
- if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size {
+ if dataItem.endStream && remainingBytes == 0 {
endStream = true
}
if dataItem.onEachWrite != nil {
dataItem.onEachWrite()
}
- if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil {
+ if err := l.framer.fr.WriteData(dataItem.streamID, endStream, (*buf)[:size]); err != nil {
return false, err
}
str.bytesOutStanding += size
l.sendQuota -= uint32(size)
dataItem.h = dataItem.h[hSize:]
- dataItem.d = dataItem.d[dSize:]
- if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out.
+ if remainingBytes == 0 { // All the data from that message was written out.
+ _ = dataItem.reader.Close()
str.itl.dequeue()
}
if str.itl.isEmpty() {
@@ -998,10 +1033,3 @@ func (l *loopyWriter) processData() (bool, error) {
}
return false, nil
}
-
-func min(a, b int) int {
- if a < b {
- return a
- }
- return b
-}
diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
index 4a3ddce29a..ce878693bd 100644
--- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
@@ -24,7 +24,6 @@
package transport
import (
- "bytes"
"context"
"errors"
"fmt"
@@ -40,6 +39,7 @@ import (
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/grpcutil"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
@@ -50,7 +50,7 @@ import (
// NewServerHandlerTransport returns a ServerTransport handling gRPC from
// inside an http.Handler, or writes an HTTP error to w and returns an error.
// It requires that the http Server supports HTTP/2.
-func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) {
+func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) {
if r.Method != http.MethodPost {
w.Header().Set("Allow", http.MethodPost)
msg := fmt.Sprintf("invalid gRPC request method %q", r.Method)
@@ -98,6 +98,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s
contentType: contentType,
contentSubtype: contentSubtype,
stats: stats,
+ bufferPool: bufferPool,
}
st.logger = prefixLoggerForServerHandlerTransport(st)
@@ -171,6 +172,8 @@ type serverHandlerTransport struct {
stats []stats.Handler
logger *grpclog.PrefixLogger
+
+ bufferPool mem.BufferPool
}
func (ht *serverHandlerTransport) Close(err error) {
@@ -244,6 +247,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
}
s.hdrMu.Lock()
+ defer s.hdrMu.Unlock()
if p := st.Proto(); p != nil && len(p.Details) > 0 {
delete(s.trailer, grpcStatusDetailsBinHeader)
stBytes, err := proto.Marshal(p)
@@ -268,7 +272,6 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
}
}
}
- s.hdrMu.Unlock()
})
if err == nil { // transport has not been closed
@@ -330,16 +333,28 @@ func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) {
s.hdrMu.Unlock()
}
-func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
+func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error {
+ // Always take a reference because otherwise there is no guarantee the data will
+ // be available after this function returns. This is what callers to Write
+ // expect.
+ data.Ref()
headersWritten := s.updateHeaderSent()
- return ht.do(func() {
+ err := ht.do(func() {
+ defer data.Free()
if !headersWritten {
ht.writePendingHeaders(s)
}
ht.rw.Write(hdr)
- ht.rw.Write(data)
+ for _, b := range data {
+ _, _ = ht.rw.Write(b.ReadOnlyData())
+ }
ht.rw.(http.Flusher).Flush()
})
+ if err != nil {
+ data.Free()
+ return err
+ }
+ return nil
}
func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
@@ -406,7 +421,7 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream
headerWireLength: 0, // won't have access to header wire length until golang/go#18997.
}
s.trReader = &transportReader{
- reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}},
+ reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf},
windowHandler: func(int) {},
}
@@ -415,21 +430,19 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream
go func() {
defer close(readerDone)
- // TODO: minimize garbage, optimize recvBuffer code/ownership
- const readSize = 8196
- for buf := make([]byte, readSize); ; {
- n, err := req.Body.Read(buf)
+ for {
+ buf := ht.bufferPool.Get(http2MaxFrameLen)
+ n, err := req.Body.Read(*buf)
if n > 0 {
- s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])})
- buf = buf[n:]
+ *buf = (*buf)[:n]
+ s.buf.put(recvMsg{buffer: mem.NewBuffer(buf, ht.bufferPool)})
+ } else {
+ ht.bufferPool.Put(buf)
}
if err != nil {
s.buf.put(recvMsg{err: mapRecvMsgError(err)})
return
}
- if len(buf) == 0 {
- buf = make([]byte, readSize)
- }
}
}()
@@ -462,7 +475,7 @@ func (ht *serverHandlerTransport) IncrMsgSent() {}
func (ht *serverHandlerTransport) IncrMsgRecv() {}
-func (ht *serverHandlerTransport) Drain(debugData string) {
+func (ht *serverHandlerTransport) Drain(string) {
panic("Drain() is not implemented")
}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
index 3c63c70698..c769deab53 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
@@ -47,6 +47,7 @@ import (
isyscall "google.golang.org/grpc/internal/syscall"
"google.golang.org/grpc/internal/transport/networktype"
"google.golang.org/grpc/keepalive"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/resolver"
@@ -59,6 +60,8 @@ import (
// atomically.
var clientConnectionCounter uint64
+var goAwayLoopyWriterTimeout = 5 * time.Second
+
var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool))
// http2Client implements the ClientTransport interface with HTTP2.
@@ -144,7 +147,7 @@ type http2Client struct {
onClose func(GoAwayReason)
- bufferPool *bufferPool
+ bufferPool mem.BufferPool
connectionID uint64
logger *grpclog.PrefixLogger
@@ -229,7 +232,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
}
}(conn)
- // The following defer and goroutine monitor the connectCtx for cancelation
+ // The following defer and goroutine monitor the connectCtx for cancellation
// and deadline. On context expiration, the connection is hard closed and
// this function will naturally fail as a result. Otherwise, the defer
// waits for the goroutine to exit to prevent the context from being
@@ -346,7 +349,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
streamQuota: defaultMaxStreamsClient,
streamsQuotaAvailable: make(chan struct{}, 1),
keepaliveEnabled: keepaliveEnabled,
- bufferPool: newBufferPool(),
+ bufferPool: opts.BufferPool,
onClose: onClose,
}
var czSecurity credentials.ChannelzSecurityValue
@@ -463,7 +466,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
return nil, err
}
go func() {
- t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler)
+ t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool)
if err := t.loopy.run(); !isIOError(err) {
// Immediately close the connection, as the loopy writer returns
// when there are no more active streams and we were draining (the
@@ -504,7 +507,6 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
closeStream: func(err error) {
t.CloseStream(s, err)
},
- freeBuffer: t.bufferPool.put,
},
windowHandler: func(n int) {
t.updateWindow(s, uint32(n))
@@ -770,7 +772,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
hdr := &headerFrame{
hf: headerFields,
endStream: false,
- initStream: func(id uint32) error {
+ initStream: func(uint32) error {
t.mu.Lock()
// TODO: handle transport closure in loopy instead and remove this
// initStream is never called when transport is draining.
@@ -983,6 +985,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.
// only once on a transport. Once it is called, the transport should not be
// accessed anymore.
func (t *http2Client) Close(err error) {
+ t.conn.SetWriteDeadline(time.Now().Add(time.Second * 10))
t.mu.Lock()
// Make sure we only close once.
if t.state == closing {
@@ -1006,10 +1009,20 @@ func (t *http2Client) Close(err error) {
t.kpDormancyCond.Signal()
}
t.mu.Unlock()
+
// Per HTTP/2 spec, a GOAWAY frame must be sent before closing the
- // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY.
+ // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. It
+ // also waits for loopyWriter to be closed with a timer to avoid the
+ // long blocking in case the connection is blackholed, i.e. TCP is
+ // just stuck.
t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte("client transport shutdown"), closeConn: err})
- <-t.writerDone
+ timer := time.NewTimer(goAwayLoopyWriterTimeout)
+ defer timer.Stop()
+ select {
+ case <-t.writerDone: // success
+ case <-timer.C:
+ t.logger.Infof("Failed to write a GOAWAY frame as part of connection close after %s. Giving up and closing the transport.", goAwayLoopyWriterTimeout)
+ }
t.cancel()
t.conn.Close()
channelz.RemoveEntry(t.channelz.ID)
@@ -1065,27 +1078,36 @@ func (t *http2Client) GracefulClose() {
// Write formats the data into HTTP2 data frame(s) and sends it out. The caller
// should proceed only if Write returns nil.
-func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
+func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error {
+ reader := data.Reader()
+
if opts.Last {
// If it's the last message, update stream state.
if !s.compareAndSwapState(streamActive, streamWriteDone) {
+ _ = reader.Close()
return errStreamDone
}
} else if s.getState() != streamActive {
+ _ = reader.Close()
return errStreamDone
}
df := &dataFrame{
streamID: s.id,
endStream: opts.Last,
h: hdr,
- d: data,
+ reader: reader,
}
- if hdr != nil || data != nil { // If it's not an empty data frame, check quota.
- if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
+ if hdr != nil || df.reader.Remaining() != 0 { // If it's not an empty data frame, check quota.
+ if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil {
+ _ = reader.Close()
return err
}
}
- return t.controlBuf.put(df)
+ if err := t.controlBuf.put(df); err != nil {
+ _ = reader.Close()
+ return err
+ }
+ return nil
}
func (t *http2Client) getStream(f http2.Frame) *Stream {
@@ -1190,10 +1212,13 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
// guarantee f.Data() is consumed before the arrival of next frame.
// Can this copy be eliminated?
if len(f.Data()) > 0 {
- buffer := t.bufferPool.get()
- buffer.Reset()
- buffer.Write(f.Data())
- s.write(recvMsg{buffer: buffer})
+ pool := t.bufferPool
+ if pool == nil {
+ // Note that this is only supposed to be nil in tests. Otherwise, stream is
+ // always initialized with a BufferPool.
+ pool = mem.DefaultBufferPool()
+ }
+ s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)})
}
}
// The server has closed the stream without sending trailers. Record that
@@ -1222,7 +1247,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
if statusCode == codes.Canceled {
if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) {
// Our deadline was already exceeded, and that was likely the cause
- // of this cancelation. Alter the status code accordingly.
+ // of this cancellation. Alter the status code accordingly.
statusCode = codes.DeadlineExceeded
}
}
@@ -1307,7 +1332,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
id := f.LastStreamID
if id > 0 && id%2 == 0 {
t.mu.Unlock()
- t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id))
+ t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id))
return
}
// A client can receive multiple GoAways from the server (see
@@ -1642,11 +1667,10 @@ func (t *http2Client) reader(errCh chan<- error) {
t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false)
}
continue
- } else {
- // Transport error.
- t.Close(connectionErrorf(true, err, "error reading from server: %v", err))
- return
}
+ // Transport error.
+ t.Close(connectionErrorf(true, err, "error reading from server: %v", err))
+ return
}
switch frame := frame.(type) {
case *http2.MetaHeadersFrame:
@@ -1671,13 +1695,6 @@ func (t *http2Client) reader(errCh chan<- error) {
}
}
-func minTime(a, b time.Duration) time.Duration {
- if a < b {
- return a
- }
- return b
-}
-
// keepalive running in a separate goroutine makes sure the connection is alive by sending pings.
func (t *http2Client) keepalive() {
p := &ping{data: [8]byte{}}
@@ -1745,7 +1762,7 @@ func (t *http2Client) keepalive() {
// timeoutLeft. This will ensure that we wait only for kp.Time
// before sending out the next ping (for cases where the ping is
// acked).
- sleepDuration := minTime(t.kp.Time, timeoutLeft)
+ sleepDuration := min(t.kp.Time, timeoutLeft)
timeoutLeft -= sleepDuration
timer.Reset(sleepDuration)
case <-t.ctx.Done():
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
index b7091165b5..584b50fe55 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
@@ -39,6 +39,7 @@ import (
"google.golang.org/grpc/internal/grpcutil"
"google.golang.org/grpc/internal/pretty"
"google.golang.org/grpc/internal/syscall"
+ "google.golang.org/grpc/mem"
"google.golang.org/protobuf/proto"
"google.golang.org/grpc/codes"
@@ -119,7 +120,7 @@ type http2Server struct {
// Fields below are for channelz metric collection.
channelz *channelz.Socket
- bufferPool *bufferPool
+ bufferPool mem.BufferPool
connectionID uint64
@@ -261,7 +262,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
idle: time.Now(),
kep: kep,
initialWindowSize: iwz,
- bufferPool: newBufferPool(),
+ bufferPool: config.BufferPool,
}
var czSecurity credentials.ChannelzSecurityValue
if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok {
@@ -330,7 +331,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
t.handleSettings(sf)
go func() {
- t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler)
+ t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool)
err := t.loopy.run()
close(t.loopyWriterDone)
if !isIOError(err) {
@@ -613,10 +614,9 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade
s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone)
s.trReader = &transportReader{
reader: &recvBufferReader{
- ctx: s.ctx,
- ctxDone: s.ctxDone,
- recv: s.buf,
- freeBuffer: t.bufferPool.put,
+ ctx: s.ctx,
+ ctxDone: s.ctxDone,
+ recv: s.buf,
},
windowHandler: func(n int) {
t.updateWindow(s, uint32(n))
@@ -813,10 +813,13 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
// guarantee f.Data() is consumed before the arrival of next frame.
// Can this copy be eliminated?
if len(f.Data()) > 0 {
- buffer := t.bufferPool.get()
- buffer.Reset()
- buffer.Write(f.Data())
- s.write(recvMsg{buffer: buffer})
+ pool := t.bufferPool
+ if pool == nil {
+ // Note that this is only supposed to be nil in tests. Otherwise, stream is
+ // always initialized with a BufferPool.
+ pool = mem.DefaultBufferPool()
+ }
+ s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)})
}
}
if f.StreamEnded() {
@@ -1089,7 +1092,9 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
onWrite: t.setResetPingStrikes,
}
- success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader)
+ success, err := t.controlBuf.executeAndPut(func() bool {
+ return t.checkForHeaderListSize(trailingHeader)
+ }, nil)
if !success {
if err != nil {
return err
@@ -1112,27 +1117,37 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
// Write converts the data into HTTP2 data frame and sends it out. Non-nil error
// is returns if it fails (e.g., framing error, transport error).
-func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
+func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error {
+ reader := data.Reader()
+
if !s.isHeaderSent() { // Headers haven't been written yet.
if err := t.WriteHeader(s, nil); err != nil {
+ _ = reader.Close()
return err
}
} else {
// Writing headers checks for this condition.
if s.getState() == streamDone {
+ _ = reader.Close()
return t.streamContextErr(s)
}
}
+
df := &dataFrame{
streamID: s.id,
h: hdr,
- d: data,
+ reader: reader,
onEachWrite: t.setResetPingStrikes,
}
- if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
+ if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil {
+ _ = reader.Close()
return t.streamContextErr(s)
}
- return t.controlBuf.put(df)
+ if err := t.controlBuf.put(df); err != nil {
+ _ = reader.Close()
+ return err
+ }
+ return nil
}
// keepalive running in a separate goroutine does the following:
@@ -1223,7 +1238,7 @@ func (t *http2Server) keepalive() {
// timeoutLeft. This will ensure that we wait only for kp.Time
// before sending out the next ping (for cases where the ping is
// acked).
- sleepDuration := minTime(t.kp.Time, kpTimeoutLeft)
+ sleepDuration := min(t.kp.Time, kpTimeoutLeft)
kpTimeoutLeft -= sleepDuration
kpTimer.Reset(sleepDuration)
case <-t.done:
diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go
index 39cef3bd44..3613d7b648 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http_util.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go
@@ -317,28 +317,32 @@ func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter {
return w
}
-func (w *bufWriter) Write(b []byte) (n int, err error) {
+func (w *bufWriter) Write(b []byte) (int, error) {
if w.err != nil {
return 0, w.err
}
if w.batchSize == 0 { // Buffer has been disabled.
- n, err = w.conn.Write(b)
+ n, err := w.conn.Write(b)
return n, toIOError(err)
}
if w.buf == nil {
b := w.pool.Get().(*[]byte)
w.buf = *b
}
+ written := 0
for len(b) > 0 {
- nn := copy(w.buf[w.offset:], b)
- b = b[nn:]
- w.offset += nn
- n += nn
- if w.offset >= w.batchSize {
- err = w.flushKeepBuffer()
+ copied := copy(w.buf[w.offset:], b)
+ b = b[copied:]
+ written += copied
+ w.offset += copied
+ if w.offset < w.batchSize {
+ continue
+ }
+ if err := w.flushKeepBuffer(); err != nil {
+ return written, err
}
}
- return n, err
+ return written, nil
}
func (w *bufWriter) Flush() error {
@@ -389,7 +393,7 @@ type framer struct {
fr *http2.Framer
}
-var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool)
+var writeBufferPoolMap = make(map[int]*sync.Pool)
var writeBufferMutex sync.Mutex
func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer {
diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go
index 24fa103257..54b2244365 100644
--- a/vendor/google.golang.org/grpc/internal/transport/proxy.go
+++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go
@@ -107,8 +107,14 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri
}
return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump)
}
-
- return &bufConn{Conn: conn, r: r}, nil
+ // The buffer could contain extra bytes from the target server, so we can't
+ // discard it. However, in many cases where the server waits for the client
+ // to send the first message (e.g. when TLS is being used), the buffer will
+ // be empty, so we can avoid the overhead of reading through this buffer.
+ if r.Buffered() != 0 {
+ return &bufConn{Conn: conn, r: r}, nil
+ }
+ return conn, nil
}
// proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy
diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go
index 4b39c0ade9..fdd6fa86cc 100644
--- a/vendor/google.golang.org/grpc/internal/transport/transport.go
+++ b/vendor/google.golang.org/grpc/internal/transport/transport.go
@@ -22,7 +22,6 @@
package transport
import (
- "bytes"
"context"
"errors"
"fmt"
@@ -37,6 +36,7 @@ import (
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/keepalive"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/resolver"
@@ -47,32 +47,10 @@ import (
const logLevel = 2
-type bufferPool struct {
- pool sync.Pool
-}
-
-func newBufferPool() *bufferPool {
- return &bufferPool{
- pool: sync.Pool{
- New: func() any {
- return new(bytes.Buffer)
- },
- },
- }
-}
-
-func (p *bufferPool) get() *bytes.Buffer {
- return p.pool.Get().(*bytes.Buffer)
-}
-
-func (p *bufferPool) put(b *bytes.Buffer) {
- p.pool.Put(b)
-}
-
// recvMsg represents the received msg from the transport. All transport
// protocol specific info has been removed.
type recvMsg struct {
- buffer *bytes.Buffer
+ buffer mem.Buffer
// nil: received some data
// io.EOF: stream is completed. data is nil.
// other non-nil error: transport failure. data is nil.
@@ -102,6 +80,9 @@ func newRecvBuffer() *recvBuffer {
func (b *recvBuffer) put(r recvMsg) {
b.mu.Lock()
if b.err != nil {
+ // drop the buffer on the floor. Since b.err is not nil, any subsequent reads
+ // will always return an error, making this buffer inaccessible.
+ r.buffer.Free()
b.mu.Unlock()
// An error had occurred earlier, don't accept more
// data or errors.
@@ -148,45 +129,97 @@ type recvBufferReader struct {
ctx context.Context
ctxDone <-chan struct{} // cache of ctx.Done() (for performance).
recv *recvBuffer
- last *bytes.Buffer // Stores the remaining data in the previous calls.
+ last mem.Buffer // Stores the remaining data in the previous calls.
err error
- freeBuffer func(*bytes.Buffer)
}
-// Read reads the next len(p) bytes from last. If last is drained, it tries to
-// read additional data from recv. It blocks if there no additional data available
-// in recv. If Read returns any non-nil error, it will continue to return that error.
-func (r *recvBufferReader) Read(p []byte) (n int, err error) {
+func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) {
if r.err != nil {
return 0, r.err
}
if r.last != nil {
- // Read remaining data left in last call.
- copied, _ := r.last.Read(p)
- if r.last.Len() == 0 {
- r.freeBuffer(r.last)
+ n, r.last = mem.ReadUnsafe(header, r.last)
+ return n, nil
+ }
+ if r.closeStream != nil {
+ n, r.err = r.readHeaderClient(header)
+ } else {
+ n, r.err = r.readHeader(header)
+ }
+ return n, r.err
+}
+
+// Read reads the next n bytes from last. If last is drained, it tries to read
+// additional data from recv. It blocks if there no additional data available in
+// recv. If Read returns any non-nil error, it will continue to return that
+// error.
+func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+ if r.last != nil {
+ buf = r.last
+ if r.last.Len() > n {
+ buf, r.last = mem.SplitUnsafe(buf, n)
+ } else {
r.last = nil
}
- return copied, nil
+ return buf, nil
}
if r.closeStream != nil {
- n, r.err = r.readClient(p)
+ buf, r.err = r.readClient(n)
} else {
- n, r.err = r.read(p)
+ buf, r.err = r.read(n)
}
- return n, r.err
+ return buf, r.err
}
-func (r *recvBufferReader) read(p []byte) (n int, err error) {
+func (r *recvBufferReader) readHeader(header []byte) (n int, err error) {
select {
case <-r.ctxDone:
return 0, ContextErr(r.ctx.Err())
case m := <-r.recv.get():
- return r.readAdditional(m, p)
+ return r.readHeaderAdditional(m, header)
+ }
+}
+
+func (r *recvBufferReader) read(n int) (buf mem.Buffer, err error) {
+ select {
+ case <-r.ctxDone:
+ return nil, ContextErr(r.ctx.Err())
+ case m := <-r.recv.get():
+ return r.readAdditional(m, n)
+ }
+}
+
+func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) {
+ // If the context is canceled, then closes the stream with nil metadata.
+ // closeStream writes its error parameter to r.recv as a recvMsg.
+ // r.readAdditional acts on that message and returns the necessary error.
+ select {
+ case <-r.ctxDone:
+ // Note that this adds the ctx error to the end of recv buffer, and
+ // reads from the head. This will delay the error until recv buffer is
+ // empty, thus will delay ctx cancellation in Recv().
+ //
+ // It's done this way to fix a race between ctx cancel and trailer. The
+ // race was, stream.Recv() may return ctx error if ctxDone wins the
+ // race, but stream.Trailer() may return a non-nil md because the stream
+ // was not marked as done when trailer is received. This closeStream
+ // call will mark stream as done, thus fix the race.
+ //
+ // TODO: delaying ctx error seems like a unnecessary side effect. What
+ // we really want is to mark the stream as done, and return ctx error
+ // faster.
+ r.closeStream(ContextErr(r.ctx.Err()))
+ m := <-r.recv.get()
+ return r.readHeaderAdditional(m, header)
+ case m := <-r.recv.get():
+ return r.readHeaderAdditional(m, header)
}
}
-func (r *recvBufferReader) readClient(p []byte) (n int, err error) {
+func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) {
// If the context is canceled, then closes the stream with nil metadata.
// closeStream writes its error parameter to r.recv as a recvMsg.
// r.readAdditional acts on that message and returns the necessary error.
@@ -207,25 +240,40 @@ func (r *recvBufferReader) readClient(p []byte) (n int, err error) {
// faster.
r.closeStream(ContextErr(r.ctx.Err()))
m := <-r.recv.get()
- return r.readAdditional(m, p)
+ return r.readAdditional(m, n)
case m := <-r.recv.get():
- return r.readAdditional(m, p)
+ return r.readAdditional(m, n)
}
}
-func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) {
+func (r *recvBufferReader) readHeaderAdditional(m recvMsg, header []byte) (n int, err error) {
r.recv.load()
if m.err != nil {
+ if m.buffer != nil {
+ m.buffer.Free()
+ }
return 0, m.err
}
- copied, _ := m.buffer.Read(p)
- if m.buffer.Len() == 0 {
- r.freeBuffer(m.buffer)
- r.last = nil
- } else {
- r.last = m.buffer
+
+ n, r.last = mem.ReadUnsafe(header, m.buffer)
+
+ return n, nil
+}
+
+func (r *recvBufferReader) readAdditional(m recvMsg, n int) (b mem.Buffer, err error) {
+ r.recv.load()
+ if m.err != nil {
+ if m.buffer != nil {
+ m.buffer.Free()
+ }
+ return nil, m.err
+ }
+
+ if m.buffer.Len() > n {
+ m.buffer, r.last = mem.SplitUnsafe(m.buffer, n)
}
- return copied, nil
+
+ return m.buffer, nil
}
type streamState uint32
@@ -241,7 +289,7 @@ const (
type Stream struct {
id uint32
st ServerTransport // nil for client side Stream
- ct *http2Client // nil for server side Stream
+ ct ClientTransport // nil for server side Stream
ctx context.Context // the associated context of the stream
cancel context.CancelFunc // always nil for client side Stream
done chan struct{} // closed at the end of stream to unblock writers. On the client side.
@@ -251,7 +299,7 @@ type Stream struct {
recvCompress string
sendCompress string
buf *recvBuffer
- trReader io.Reader
+ trReader *transportReader
fc *inFlow
wq *writeQuota
@@ -408,7 +456,7 @@ func (s *Stream) TrailersOnly() bool {
return s.noHeaders
}
-// Trailer returns the cached trailer metedata. Note that if it is not called
+// Trailer returns the cached trailer metadata. Note that if it is not called
// after the entire stream is done, it could return an empty MD. Client
// side only.
// It can be safely read only after stream has ended that is either read
@@ -499,36 +547,87 @@ func (s *Stream) write(m recvMsg) {
s.buf.put(m)
}
-// Read reads all p bytes from the wire for this stream.
-func (s *Stream) Read(p []byte) (n int, err error) {
+func (s *Stream) ReadHeader(header []byte) (err error) {
+ // Don't request a read if there was an error earlier
+ if er := s.trReader.er; er != nil {
+ return er
+ }
+ s.requestRead(len(header))
+ for len(header) != 0 {
+ n, err := s.trReader.ReadHeader(header)
+ header = header[n:]
+ if len(header) == 0 {
+ err = nil
+ }
+ if err != nil {
+ if n > 0 && err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return err
+ }
+ }
+ return nil
+}
+
+// Read reads n bytes from the wire for this stream.
+func (s *Stream) Read(n int) (data mem.BufferSlice, err error) {
// Don't request a read if there was an error earlier
- if er := s.trReader.(*transportReader).er; er != nil {
- return 0, er
+ if er := s.trReader.er; er != nil {
+ return nil, er
}
- s.requestRead(len(p))
- return io.ReadFull(s.trReader, p)
+ s.requestRead(n)
+ for n != 0 {
+ buf, err := s.trReader.Read(n)
+ var bufLen int
+ if buf != nil {
+ bufLen = buf.Len()
+ }
+ n -= bufLen
+ if n == 0 {
+ err = nil
+ }
+ if err != nil {
+ if bufLen > 0 && err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ data.Free()
+ return nil, err
+ }
+ data = append(data, buf)
+ }
+ return data, nil
}
-// tranportReader reads all the data available for this Stream from the transport and
+// transportReader reads all the data available for this Stream from the transport and
// passes them into the decoder, which converts them into a gRPC message stream.
// The error is io.EOF when the stream is done or another non-nil error if
// the stream broke.
type transportReader struct {
- reader io.Reader
+ reader *recvBufferReader
// The handler to control the window update procedure for both this
// particular stream and the associated transport.
windowHandler func(int)
er error
}
-func (t *transportReader) Read(p []byte) (n int, err error) {
- n, err = t.reader.Read(p)
+func (t *transportReader) ReadHeader(header []byte) (int, error) {
+ n, err := t.reader.ReadHeader(header)
if err != nil {
t.er = err
- return
+ return 0, err
+ }
+ t.windowHandler(len(header))
+ return n, nil
+}
+
+func (t *transportReader) Read(n int) (mem.Buffer, error) {
+ buf, err := t.reader.Read(n)
+ if err != nil {
+ t.er = err
+ return buf, err
}
- t.windowHandler(n)
- return
+ t.windowHandler(buf.Len())
+ return buf, nil
}
// BytesReceived indicates whether any bytes have been received on this stream.
@@ -574,6 +673,7 @@ type ServerConfig struct {
ChannelzParent *channelz.Server
MaxHeaderListSize *uint32
HeaderTableSize *uint32
+ BufferPool mem.BufferPool
}
// ConnectOptions covers all relevant options for communicating with the server.
@@ -612,6 +712,8 @@ type ConnectOptions struct {
MaxHeaderListSize *uint32
// UseProxy specifies if a proxy should be used.
UseProxy bool
+ // The mem.BufferPool to use when reading/writing to the wire.
+ BufferPool mem.BufferPool
}
// NewClientTransport establishes the transport with the required ConnectOptions
@@ -673,7 +775,7 @@ type ClientTransport interface {
// Write sends the data for the given stream. A nil stream indicates
// the write is to be performed on the transport as a whole.
- Write(s *Stream, hdr []byte, data []byte, opts *Options) error
+ Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error
// NewStream creates a Stream for an RPC.
NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error)
@@ -725,7 +827,7 @@ type ServerTransport interface {
// Write sends the data for the given stream.
// Write may not be called on all streams.
- Write(s *Stream, hdr []byte, data []byte, opts *Options) error
+ Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error
// WriteStatus sends the status of a stream to the client. WriteStatus is
// the final call made on a stream and always occurs.
@@ -798,7 +900,7 @@ var (
// connection is draining. This could be caused by goaway or balancer
// removing the address.
errStreamDrain = status.Error(codes.Unavailable, "the connection is draining")
- // errStreamDone is returned from write at the client side to indiacte application
+ // errStreamDone is returned from write at the client side to indicate application
// layer of an error.
errStreamDone = errors.New("the stream is done")
// StatusGoAway indicates that the server sent a GOAWAY that included this
diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go
index 34d31b5e7d..eb42b19fb9 100644
--- a/vendor/google.golang.org/grpc/keepalive/keepalive.go
+++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go
@@ -34,15 +34,29 @@ type ClientParameters struct {
// After a duration of this time if the client doesn't see any activity it
// pings the server to see if the transport is still alive.
// If set below 10s, a minimum value of 10s will be used instead.
- Time time.Duration // The current default value is infinity.
+ //
+ // Note that gRPC servers have a default EnforcementPolicy.MinTime of 5
+ // minutes (which means the client shouldn't ping more frequently than every
+ // 5 minutes).
+ //
+ // Though not ideal, it's not a strong requirement for Time to be less than
+ // EnforcementPolicy.MinTime. Time will automatically double if the server
+ // disconnects due to its enforcement policy.
+ //
+ // For more details, see
+ // https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md
+ Time time.Duration
// After having pinged for keepalive check, the client waits for a duration
// of Timeout and if no activity is seen even after that the connection is
// closed.
- Timeout time.Duration // The current default value is 20 seconds.
+ //
+ // If keepalive is enabled, and this value is not explicitly set, the default
+ // is 20 seconds.
+ Timeout time.Duration
// If true, client sends keepalive pings even with no active RPCs. If false,
// when there are no active RPCs, Time and Timeout will be ignored and no
// keepalive pings will be sent.
- PermitWithoutStream bool // false by default.
+ PermitWithoutStream bool
}
// ServerParameters is used to set keepalive and max-age parameters on the
diff --git a/vendor/google.golang.org/grpc/mem/buffer_pool.go b/vendor/google.golang.org/grpc/mem/buffer_pool.go
new file mode 100644
index 0000000000..c37c58c023
--- /dev/null
+++ b/vendor/google.golang.org/grpc/mem/buffer_pool.go
@@ -0,0 +1,194 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package mem
+
+import (
+ "sort"
+ "sync"
+
+ "google.golang.org/grpc/internal"
+)
+
+// BufferPool is a pool of buffers that can be shared and reused, resulting in
+// decreased memory allocation.
+type BufferPool interface {
+ // Get returns a buffer with specified length from the pool.
+ Get(length int) *[]byte
+
+ // Put returns a buffer to the pool.
+ Put(*[]byte)
+}
+
+var defaultBufferPoolSizes = []int{
+ 256,
+ 4 << 10, // 4KB (go page size)
+ 16 << 10, // 16KB (max HTTP/2 frame size used by gRPC)
+ 32 << 10, // 32KB (default buffer size for io.Copy)
+ 1 << 20, // 1MB
+}
+
+var defaultBufferPool BufferPool
+
+func init() {
+ defaultBufferPool = NewTieredBufferPool(defaultBufferPoolSizes...)
+
+ internal.SetDefaultBufferPoolForTesting = func(pool BufferPool) {
+ defaultBufferPool = pool
+ }
+
+ internal.SetBufferPoolingThresholdForTesting = func(threshold int) {
+ bufferPoolingThreshold = threshold
+ }
+}
+
+// DefaultBufferPool returns the current default buffer pool. It is a BufferPool
+// created with NewBufferPool that uses a set of default sizes optimized for
+// expected workflows.
+func DefaultBufferPool() BufferPool {
+ return defaultBufferPool
+}
+
+// NewTieredBufferPool returns a BufferPool implementation that uses multiple
+// underlying pools of the given pool sizes.
+func NewTieredBufferPool(poolSizes ...int) BufferPool {
+ sort.Ints(poolSizes)
+ pools := make([]*sizedBufferPool, len(poolSizes))
+ for i, s := range poolSizes {
+ pools[i] = newSizedBufferPool(s)
+ }
+ return &tieredBufferPool{
+ sizedPools: pools,
+ }
+}
+
+// tieredBufferPool implements the BufferPool interface with multiple tiers of
+// buffer pools for different sizes of buffers.
+type tieredBufferPool struct {
+ sizedPools []*sizedBufferPool
+ fallbackPool simpleBufferPool
+}
+
+func (p *tieredBufferPool) Get(size int) *[]byte {
+ return p.getPool(size).Get(size)
+}
+
+func (p *tieredBufferPool) Put(buf *[]byte) {
+ p.getPool(cap(*buf)).Put(buf)
+}
+
+func (p *tieredBufferPool) getPool(size int) BufferPool {
+ poolIdx := sort.Search(len(p.sizedPools), func(i int) bool {
+ return p.sizedPools[i].defaultSize >= size
+ })
+
+ if poolIdx == len(p.sizedPools) {
+ return &p.fallbackPool
+ }
+
+ return p.sizedPools[poolIdx]
+}
+
+// sizedBufferPool is a BufferPool implementation that is optimized for specific
+// buffer sizes. For example, HTTP/2 frames within gRPC have a default max size
+// of 16kb and a sizedBufferPool can be configured to only return buffers with a
+// capacity of 16kb. Note that however it does not support returning larger
+// buffers and in fact panics if such a buffer is requested. Because of this,
+// this BufferPool implementation is not meant to be used on its own and rather
+// is intended to be embedded in a tieredBufferPool such that Get is only
+// invoked when the required size is smaller than or equal to defaultSize.
+type sizedBufferPool struct {
+ pool sync.Pool
+ defaultSize int
+}
+
+func (p *sizedBufferPool) Get(size int) *[]byte {
+ buf := p.pool.Get().(*[]byte)
+ b := *buf
+ clear(b[:cap(b)])
+ *buf = b[:size]
+ return buf
+}
+
+func (p *sizedBufferPool) Put(buf *[]byte) {
+ if cap(*buf) < p.defaultSize {
+ // Ignore buffers that are too small to fit in the pool. Otherwise, when
+ // Get is called it will panic as it tries to index outside the bounds
+ // of the buffer.
+ return
+ }
+ p.pool.Put(buf)
+}
+
+func newSizedBufferPool(size int) *sizedBufferPool {
+ return &sizedBufferPool{
+ pool: sync.Pool{
+ New: func() any {
+ buf := make([]byte, size)
+ return &buf
+ },
+ },
+ defaultSize: size,
+ }
+}
+
+var _ BufferPool = (*simpleBufferPool)(nil)
+
+// simpleBufferPool is an implementation of the BufferPool interface that
+// attempts to pool buffers with a sync.Pool. When Get is invoked, it tries to
+// acquire a buffer from the pool but if that buffer is too small, it returns it
+// to the pool and creates a new one.
+type simpleBufferPool struct {
+ pool sync.Pool
+}
+
+func (p *simpleBufferPool) Get(size int) *[]byte {
+ bs, ok := p.pool.Get().(*[]byte)
+ if ok && cap(*bs) >= size {
+ *bs = (*bs)[:size]
+ return bs
+ }
+
+ // A buffer was pulled from the pool, but it is too small. Put it back in
+ // the pool and create one large enough.
+ if ok {
+ p.pool.Put(bs)
+ }
+
+ b := make([]byte, size)
+ return &b
+}
+
+func (p *simpleBufferPool) Put(buf *[]byte) {
+ p.pool.Put(buf)
+}
+
+var _ BufferPool = NopBufferPool{}
+
+// NopBufferPool is a buffer pool that returns new buffers without pooling.
+type NopBufferPool struct{}
+
+// Get returns a buffer with specified length from the pool.
+func (NopBufferPool) Get(length int) *[]byte {
+ b := make([]byte, length)
+ return &b
+}
+
+// Put returns a buffer to the pool.
+func (NopBufferPool) Put(*[]byte) {
+}
diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go
new file mode 100644
index 0000000000..228e9c2f20
--- /dev/null
+++ b/vendor/google.golang.org/grpc/mem/buffer_slice.go
@@ -0,0 +1,226 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package mem
+
+import (
+ "io"
+)
+
+// BufferSlice offers a means to represent data that spans one or more Buffer
+// instances. A BufferSlice is meant to be immutable after creation, and methods
+// like Ref create and return copies of the slice. This is why all methods have
+// value receivers rather than pointer receivers.
+//
+// Note that any of the methods that read the underlying buffers such as Ref,
+// Len or CopyTo etc., will panic if any underlying buffers have already been
+// freed. It is recommended to not directly interact with any of the underlying
+// buffers directly, rather such interactions should be mediated through the
+// various methods on this type.
+//
+// By convention, any APIs that return (mem.BufferSlice, error) should reduce
+// the burden on the caller by never returning a mem.BufferSlice that needs to
+// be freed if the error is non-nil, unless explicitly stated.
+type BufferSlice []Buffer
+
+// Len returns the sum of the length of all the Buffers in this slice.
+//
+// # Warning
+//
+// Invoking the built-in len on a BufferSlice will return the number of buffers
+// in the slice, and *not* the value returned by this function.
+func (s BufferSlice) Len() int {
+ var length int
+ for _, b := range s {
+ length += b.Len()
+ }
+ return length
+}
+
+// Ref invokes Ref on each buffer in the slice.
+func (s BufferSlice) Ref() {
+ for _, b := range s {
+ b.Ref()
+ }
+}
+
+// Free invokes Buffer.Free() on each Buffer in the slice.
+func (s BufferSlice) Free() {
+ for _, b := range s {
+ b.Free()
+ }
+}
+
+// CopyTo copies each of the underlying Buffer's data into the given buffer,
+// returning the number of bytes copied. Has the same semantics as the copy
+// builtin in that it will copy as many bytes as it can, stopping when either dst
+// is full or s runs out of data, returning the minimum of s.Len() and len(dst).
+func (s BufferSlice) CopyTo(dst []byte) int {
+ off := 0
+ for _, b := range s {
+ off += copy(dst[off:], b.ReadOnlyData())
+ }
+ return off
+}
+
+// Materialize concatenates all the underlying Buffer's data into a single
+// contiguous buffer using CopyTo.
+func (s BufferSlice) Materialize() []byte {
+ l := s.Len()
+ if l == 0 {
+ return nil
+ }
+ out := make([]byte, l)
+ s.CopyTo(out)
+ return out
+}
+
+// MaterializeToBuffer functions like Materialize except that it writes the data
+// to a single Buffer pulled from the given BufferPool.
+//
+// As a special case, if the input BufferSlice only actually has one Buffer, this
+// function simply increases the refcount before returning said Buffer. Freeing this
+// buffer won't release it until the BufferSlice is itself released.
+func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer {
+ if len(s) == 1 {
+ s[0].Ref()
+ return s[0]
+ }
+ sLen := s.Len()
+ if sLen == 0 {
+ return emptyBuffer{}
+ }
+ buf := pool.Get(sLen)
+ s.CopyTo(*buf)
+ return NewBuffer(buf, pool)
+}
+
+// Reader returns a new Reader for the input slice after taking references to
+// each underlying buffer.
+func (s BufferSlice) Reader() Reader {
+ s.Ref()
+ return &sliceReader{
+ data: s,
+ len: s.Len(),
+ }
+}
+
+// Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface
+// with other parts systems. It also provides an additional convenience method
+// Remaining(), which returns the number of unread bytes remaining in the slice.
+// Buffers will be freed as they are read.
+type Reader interface {
+ io.Reader
+ io.ByteReader
+ // Close frees the underlying BufferSlice and never returns an error. Subsequent
+ // calls to Read will return (0, io.EOF).
+ Close() error
+ // Remaining returns the number of unread bytes remaining in the slice.
+ Remaining() int
+}
+
+type sliceReader struct {
+ data BufferSlice
+ len int
+ // The index into data[0].ReadOnlyData().
+ bufferIdx int
+}
+
+func (r *sliceReader) Remaining() int {
+ return r.len
+}
+
+func (r *sliceReader) Close() error {
+ r.data.Free()
+ r.data = nil
+ r.len = 0
+ return nil
+}
+
+func (r *sliceReader) freeFirstBufferIfEmpty() bool {
+ if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) {
+ return false
+ }
+
+ r.data[0].Free()
+ r.data = r.data[1:]
+ r.bufferIdx = 0
+ return true
+}
+
+func (r *sliceReader) Read(buf []byte) (n int, _ error) {
+ if r.len == 0 {
+ return 0, io.EOF
+ }
+
+ for len(buf) != 0 && r.len != 0 {
+ // Copy as much as possible from the first Buffer in the slice into the
+ // given byte slice.
+ data := r.data[0].ReadOnlyData()
+ copied := copy(buf, data[r.bufferIdx:])
+ r.len -= copied // Reduce len by the number of bytes copied.
+ r.bufferIdx += copied // Increment the buffer index.
+ n += copied // Increment the total number of bytes read.
+ buf = buf[copied:] // Shrink the given byte slice.
+
+ // If we have copied all the data from the first Buffer, free it and advance to
+ // the next in the slice.
+ r.freeFirstBufferIfEmpty()
+ }
+
+ return n, nil
+}
+
+func (r *sliceReader) ReadByte() (byte, error) {
+ if r.len == 0 {
+ return 0, io.EOF
+ }
+
+ // There may be any number of empty buffers in the slice, clear them all until a
+ // non-empty buffer is reached. This is guaranteed to exit since r.len is not 0.
+ for r.freeFirstBufferIfEmpty() {
+ }
+
+ b := r.data[0].ReadOnlyData()[r.bufferIdx]
+ r.len--
+ r.bufferIdx++
+ // Free the first buffer in the slice if the last byte was read
+ r.freeFirstBufferIfEmpty()
+ return b, nil
+}
+
+var _ io.Writer = (*writer)(nil)
+
+type writer struct {
+ buffers *BufferSlice
+ pool BufferPool
+}
+
+func (w *writer) Write(p []byte) (n int, err error) {
+ b := Copy(p, w.pool)
+ *w.buffers = append(*w.buffers, b)
+ return b.Len(), nil
+}
+
+// NewWriter wraps the given BufferSlice and BufferPool to implement the
+// io.Writer interface. Every call to Write copies the contents of the given
+// buffer into a new Buffer pulled from the given pool and the Buffer is added to
+// the given BufferSlice.
+func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer {
+ return &writer{buffers: buffers, pool: pool}
+}
diff --git a/vendor/google.golang.org/grpc/mem/buffers.go b/vendor/google.golang.org/grpc/mem/buffers.go
new file mode 100644
index 0000000000..4d66b2ccc2
--- /dev/null
+++ b/vendor/google.golang.org/grpc/mem/buffers.go
@@ -0,0 +1,252 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package mem provides utilities that facilitate memory reuse in byte slices
+// that are used as buffers.
+//
+// # Experimental
+//
+// Notice: All APIs in this package are EXPERIMENTAL and may be changed or
+// removed in a later release.
+package mem
+
+import (
+ "fmt"
+ "sync"
+ "sync/atomic"
+)
+
+// A Buffer represents a reference counted piece of data (in bytes) that can be
+// acquired by a call to NewBuffer() or Copy(). A reference to a Buffer may be
+// released by calling Free(), which invokes the free function given at creation
+// only after all references are released.
+//
+// Note that a Buffer is not safe for concurrent access and instead each
+// goroutine should use its own reference to the data, which can be acquired via
+// a call to Ref().
+//
+// Attempts to access the underlying data after releasing the reference to the
+// Buffer will panic.
+type Buffer interface {
+ // ReadOnlyData returns the underlying byte slice. Note that it is undefined
+ // behavior to modify the contents of this slice in any way.
+ ReadOnlyData() []byte
+ // Ref increases the reference counter for this Buffer.
+ Ref()
+ // Free decrements this Buffer's reference counter and frees the underlying
+ // byte slice if the counter reaches 0 as a result of this call.
+ Free()
+ // Len returns the Buffer's size.
+ Len() int
+
+ split(n int) (left, right Buffer)
+ read(buf []byte) (int, Buffer)
+}
+
+var (
+ bufferPoolingThreshold = 1 << 10
+
+ bufferObjectPool = sync.Pool{New: func() any { return new(buffer) }}
+ refObjectPool = sync.Pool{New: func() any { return new(atomic.Int32) }}
+)
+
+func IsBelowBufferPoolingThreshold(size int) bool {
+ return size <= bufferPoolingThreshold
+}
+
+type buffer struct {
+ origData *[]byte
+ data []byte
+ refs *atomic.Int32
+ pool BufferPool
+}
+
+func newBuffer() *buffer {
+ return bufferObjectPool.Get().(*buffer)
+}
+
+// NewBuffer creates a new Buffer from the given data, initializing the reference
+// counter to 1. The data will then be returned to the given pool when all
+// references to the returned Buffer are released. As a special case to avoid
+// additional allocations, if the given buffer pool is nil, the returned buffer
+// will be a "no-op" Buffer where invoking Buffer.Free() does nothing and the
+// underlying data is never freed.
+//
+// Note that the backing array of the given data is not copied.
+func NewBuffer(data *[]byte, pool BufferPool) Buffer {
+ if pool == nil || IsBelowBufferPoolingThreshold(len(*data)) {
+ return (SliceBuffer)(*data)
+ }
+ b := newBuffer()
+ b.origData = data
+ b.data = *data
+ b.pool = pool
+ b.refs = refObjectPool.Get().(*atomic.Int32)
+ b.refs.Add(1)
+ return b
+}
+
+// Copy creates a new Buffer from the given data, initializing the reference
+// counter to 1.
+//
+// It acquires a []byte from the given pool and copies over the backing array
+// of the given data. The []byte acquired from the pool is returned to the
+// pool when all references to the returned Buffer are released.
+func Copy(data []byte, pool BufferPool) Buffer {
+ if IsBelowBufferPoolingThreshold(len(data)) {
+ buf := make(SliceBuffer, len(data))
+ copy(buf, data)
+ return buf
+ }
+
+ buf := pool.Get(len(data))
+ copy(*buf, data)
+ return NewBuffer(buf, pool)
+}
+
+func (b *buffer) ReadOnlyData() []byte {
+ if b.refs == nil {
+ panic("Cannot read freed buffer")
+ }
+ return b.data
+}
+
+func (b *buffer) Ref() {
+ if b.refs == nil {
+ panic("Cannot ref freed buffer")
+ }
+ b.refs.Add(1)
+}
+
+func (b *buffer) Free() {
+ if b.refs == nil {
+ panic("Cannot free freed buffer")
+ }
+
+ refs := b.refs.Add(-1)
+ switch {
+ case refs > 0:
+ return
+ case refs == 0:
+ if b.pool != nil {
+ b.pool.Put(b.origData)
+ }
+
+ refObjectPool.Put(b.refs)
+ b.origData = nil
+ b.data = nil
+ b.refs = nil
+ b.pool = nil
+ bufferObjectPool.Put(b)
+ default:
+ panic("Cannot free freed buffer")
+ }
+}
+
+func (b *buffer) Len() int {
+ return len(b.ReadOnlyData())
+}
+
+func (b *buffer) split(n int) (Buffer, Buffer) {
+ if b.refs == nil {
+ panic("Cannot split freed buffer")
+ }
+
+ b.refs.Add(1)
+ split := newBuffer()
+ split.origData = b.origData
+ split.data = b.data[n:]
+ split.refs = b.refs
+ split.pool = b.pool
+
+ b.data = b.data[:n]
+
+ return b, split
+}
+
+func (b *buffer) read(buf []byte) (int, Buffer) {
+ if b.refs == nil {
+ panic("Cannot read freed buffer")
+ }
+
+ n := copy(buf, b.data)
+ if n == len(b.data) {
+ b.Free()
+ return n, nil
+ }
+
+ b.data = b.data[n:]
+ return n, b
+}
+
+// String returns a string representation of the buffer. May be used for
+// debugging purposes.
+func (b *buffer) String() string {
+ return fmt.Sprintf("mem.Buffer(%p, data: %p, length: %d)", b, b.ReadOnlyData(), len(b.ReadOnlyData()))
+}
+
+func ReadUnsafe(dst []byte, buf Buffer) (int, Buffer) {
+ return buf.read(dst)
+}
+
+// SplitUnsafe modifies the receiver to point to the first n bytes while it
+// returns a new reference to the remaining bytes. The returned Buffer functions
+// just like a normal reference acquired using Ref().
+func SplitUnsafe(buf Buffer, n int) (left, right Buffer) {
+ return buf.split(n)
+}
+
+type emptyBuffer struct{}
+
+func (e emptyBuffer) ReadOnlyData() []byte {
+ return nil
+}
+
+func (e emptyBuffer) Ref() {}
+func (e emptyBuffer) Free() {}
+
+func (e emptyBuffer) Len() int {
+ return 0
+}
+
+func (e emptyBuffer) split(int) (left, right Buffer) {
+ return e, e
+}
+
+func (e emptyBuffer) read([]byte) (int, Buffer) {
+ return 0, e
+}
+
+type SliceBuffer []byte
+
+func (s SliceBuffer) ReadOnlyData() []byte { return s }
+func (s SliceBuffer) Ref() {}
+func (s SliceBuffer) Free() {}
+func (s SliceBuffer) Len() int { return len(s) }
+
+func (s SliceBuffer) split(n int) (left, right Buffer) {
+ return s[:n], s[n:]
+}
+
+func (s SliceBuffer) read(buf []byte) (int, Buffer) {
+ n := copy(buf, s)
+ if n == len(s) {
+ return n, nil
+ }
+ return n, s[n:]
+}
diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go
index 1e9485fd6e..d2e15253bb 100644
--- a/vendor/google.golang.org/grpc/metadata/metadata.go
+++ b/vendor/google.golang.org/grpc/metadata/metadata.go
@@ -213,11 +213,6 @@ func FromIncomingContext(ctx context.Context) (MD, bool) {
// ValueFromIncomingContext returns the metadata value corresponding to the metadata
// key from the incoming metadata if it exists. Keys are matched in a case insensitive
// manner.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
func ValueFromIncomingContext(ctx context.Context, key string) []string {
md, ok := ctx.Value(mdIncomingKey{}).(MD)
if !ok {
@@ -228,7 +223,7 @@ func ValueFromIncomingContext(ctx context.Context, key string) []string {
return copyOf(v)
}
for k, v := range md {
- // Case insenitive comparison: MD is a map, and there's no guarantee
+ // Case insensitive comparison: MD is a map, and there's no guarantee
// that the MD attached to the context is created using our helper
// functions.
if strings.EqualFold(k, key) {
diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go
index 73bd633643..e87a17f36a 100644
--- a/vendor/google.golang.org/grpc/preloader.go
+++ b/vendor/google.golang.org/grpc/preloader.go
@@ -20,6 +20,7 @@ package grpc
import (
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/status"
)
@@ -31,9 +32,10 @@ import (
// later release.
type PreparedMsg struct {
// Struct for preparing msg before sending them
- encodedData []byte
+ encodedData mem.BufferSlice
hdr []byte
- payload []byte
+ payload mem.BufferSlice
+ pf payloadFormat
}
// Encode marshalls and compresses the message using the codec and compressor for the stream.
@@ -57,11 +59,27 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error {
if err != nil {
return err
}
- p.encodedData = data
- compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp)
+
+ materializedData := data.Materialize()
+ data.Free()
+ p.encodedData = mem.BufferSlice{mem.NewBuffer(&materializedData, nil)}
+
+ // TODO: it should be possible to grab the bufferPool from the underlying
+ // stream implementation with a type cast to its actual type (such as
+ // addrConnStream) and accessing the buffer pool directly.
+ var compData mem.BufferSlice
+ compData, p.pf, err = compress(p.encodedData, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp, mem.DefaultBufferPool())
if err != nil {
return err
}
- p.hdr, p.payload = msgHeader(data, compData)
+
+ if p.pf.isCompressed() {
+ materializedCompData := compData.Materialize()
+ compData.Free()
+ compData = mem.BufferSlice{mem.NewBuffer(&materializedCompData, nil)}
+ }
+
+ p.hdr, p.payload = msgHeader(p.encodedData, compData, p.pf)
+
return nil
}
diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh
deleted file mode 100644
index 3edca296c2..0000000000
--- a/vendor/google.golang.org/grpc/regenerate.sh
+++ /dev/null
@@ -1,123 +0,0 @@
-#!/bin/bash
-# Copyright 2020 gRPC authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -eu -o pipefail
-
-WORKDIR=$(mktemp -d)
-
-function finish {
- rm -rf "$WORKDIR"
-}
-trap finish EXIT
-
-export GOBIN=${WORKDIR}/bin
-export PATH=${GOBIN}:${PATH}
-mkdir -p ${GOBIN}
-
-echo "remove existing generated files"
-# grpc_testing_not_regenerate/*.pb.go is not re-generated,
-# see grpc_testing_not_regenerate/README.md for details.
-rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate')
-
-echo "go install google.golang.org/protobuf/cmd/protoc-gen-go"
-(cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go)
-
-echo "go install cmd/protoc-gen-go-grpc"
-(cd cmd/protoc-gen-go-grpc && go install .)
-
-echo "git clone https://github.com/grpc/grpc-proto"
-git clone --quiet https://github.com/grpc/grpc-proto ${WORKDIR}/grpc-proto
-
-echo "git clone https://github.com/protocolbuffers/protobuf"
-git clone --quiet https://github.com/protocolbuffers/protobuf ${WORKDIR}/protobuf
-
-# Pull in code.proto as a proto dependency
-mkdir -p ${WORKDIR}/googleapis/google/rpc
-echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto"
-curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto
-
-mkdir -p ${WORKDIR}/out
-
-# Generates sources without the embed requirement
-LEGACY_SOURCES=(
- ${WORKDIR}/grpc-proto/grpc/binlog/v1/binarylog.proto
- ${WORKDIR}/grpc-proto/grpc/channelz/v1/channelz.proto
- ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto
- ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto
- profiling/proto/service.proto
- ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto
- ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto
-)
-
-# Generates only the new gRPC Service symbols
-SOURCES=(
- $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^profiling/proto/service.proto$')
- ${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto
- ${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto
- ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto
- ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto
- ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto
- ${WORKDIR}/grpc-proto/grpc/testing/*.proto
- ${WORKDIR}/grpc-proto/grpc/core/*.proto
-)
-
-# These options of the form 'Mfoo.proto=bar' instruct the codegen to use an
-# import path of 'bar' in the generated code when 'foo.proto' is imported in
-# one of the sources.
-#
-# Note that the protos listed here are all for testing purposes. All protos to
-# be used externally should have a go_package option (and they don't need to be
-# listed here).
-OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\
-Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\
-Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\
-Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\
-Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\
-Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\
-Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\
-Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\
-Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\
-Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing
-
-for src in ${SOURCES[@]}; do
- echo "protoc ${src}"
- protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},use_generic_streams_experimental=true:${WORKDIR}/out \
- -I"." \
- -I${WORKDIR}/grpc-proto \
- -I${WORKDIR}/googleapis \
- -I${WORKDIR}/protobuf/src \
- ${src}
-done
-
-for src in ${LEGACY_SOURCES[@]}; do
- echo "protoc ${src}"
- protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},require_unimplemented_servers=false:${WORKDIR}/out \
- -I"." \
- -I${WORKDIR}/grpc-proto \
- -I${WORKDIR}/googleapis \
- -I${WORKDIR}/protobuf/src \
- ${src}
-done
-
-# The go_package option in grpc/lookup/v1/rls.proto doesn't match the
-# current location. Move it into the right place.
-mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1
-mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1
-
-# grpc_testing_not_regenerate/*.pb.go are not re-generated,
-# see grpc_testing_not_regenerate/README.md for details.
-rm ${WORKDIR}/out/google.golang.org/grpc/reflection/test/grpc_testing_not_regenerate/*.pb.go
-
-cp -R ${WORKDIR}/out/google.golang.org/grpc/* .
diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go
index c5fb45236f..23bb3fb258 100644
--- a/vendor/google.golang.org/grpc/resolver_wrapper.go
+++ b/vendor/google.golang.org/grpc/resolver_wrapper.go
@@ -66,7 +66,7 @@ func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper {
// any newly created ccResolverWrapper, except that close may be called instead.
func (ccr *ccResolverWrapper) start() error {
errCh := make(chan error)
- ccr.serializer.Schedule(func(ctx context.Context) {
+ ccr.serializer.TrySchedule(func(ctx context.Context) {
if ctx.Err() != nil {
return
}
@@ -85,7 +85,7 @@ func (ccr *ccResolverWrapper) start() error {
}
func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) {
- ccr.serializer.Schedule(func(ctx context.Context) {
+ ccr.serializer.TrySchedule(func(ctx context.Context) {
if ctx.Err() != nil || ccr.resolver == nil {
return
}
@@ -102,7 +102,7 @@ func (ccr *ccResolverWrapper) close() {
ccr.closed = true
ccr.mu.Unlock()
- ccr.serializer.Schedule(func(context.Context) {
+ ccr.serializer.TrySchedule(func(context.Context) {
if ccr.resolver == nil {
return
}
@@ -177,6 +177,9 @@ func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.P
// addChannelzTraceEvent adds a channelz trace event containing the new
// state received from resolver implementations.
func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
+ if !logger.V(0) && !channelz.IsOn() {
+ return
+ }
var updates []string
var oldSC, newSC *ServiceConfig
var oldOK, newOK bool
diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go
index fdd49e6e91..2d96f1405e 100644
--- a/vendor/google.golang.org/grpc/rpc_util.go
+++ b/vendor/google.golang.org/grpc/rpc_util.go
@@ -19,7 +19,6 @@
package grpc
import (
- "bytes"
"compress/gzip"
"context"
"encoding/binary"
@@ -35,6 +34,7 @@ import (
"google.golang.org/grpc/encoding"
"google.golang.org/grpc/encoding/proto"
"google.golang.org/grpc/internal/transport"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
@@ -220,8 +220,8 @@ type HeaderCallOption struct {
HeaderAddr *metadata.MD
}
-func (o HeaderCallOption) before(c *callInfo) error { return nil }
-func (o HeaderCallOption) after(c *callInfo, attempt *csAttempt) {
+func (o HeaderCallOption) before(*callInfo) error { return nil }
+func (o HeaderCallOption) after(_ *callInfo, attempt *csAttempt) {
*o.HeaderAddr, _ = attempt.s.Header()
}
@@ -242,8 +242,8 @@ type TrailerCallOption struct {
TrailerAddr *metadata.MD
}
-func (o TrailerCallOption) before(c *callInfo) error { return nil }
-func (o TrailerCallOption) after(c *callInfo, attempt *csAttempt) {
+func (o TrailerCallOption) before(*callInfo) error { return nil }
+func (o TrailerCallOption) after(_ *callInfo, attempt *csAttempt) {
*o.TrailerAddr = attempt.s.Trailer()
}
@@ -264,24 +264,20 @@ type PeerCallOption struct {
PeerAddr *peer.Peer
}
-func (o PeerCallOption) before(c *callInfo) error { return nil }
-func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) {
+func (o PeerCallOption) before(*callInfo) error { return nil }
+func (o PeerCallOption) after(_ *callInfo, attempt *csAttempt) {
if x, ok := peer.FromContext(attempt.s.Context()); ok {
*o.PeerAddr = *x
}
}
-// WaitForReady configures the action to take when an RPC is attempted on broken
-// connections or unreachable servers. If waitForReady is false and the
-// connection is in the TRANSIENT_FAILURE state, the RPC will fail
-// immediately. Otherwise, the RPC client will block the call until a
-// connection is available (or the call is canceled or times out) and will
-// retry the call if it fails due to a transient error. gRPC will not retry if
-// data was written to the wire unless the server indicates it did not process
-// the data. Please refer to
-// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md.
+// WaitForReady configures the RPC's behavior when the client is in
+// TRANSIENT_FAILURE, which occurs when all addresses fail to connect. If
+// waitForReady is false, the RPC will fail immediately. Otherwise, the client
+// will wait until a connection becomes available or the RPC's deadline is
+// reached.
//
-// By default, RPCs don't "wait for ready".
+// By default, RPCs do not "wait for ready".
func WaitForReady(waitForReady bool) CallOption {
return FailFastCallOption{FailFast: !waitForReady}
}
@@ -308,7 +304,7 @@ func (o FailFastCallOption) before(c *callInfo) error {
c.failFast = o.FailFast
return nil
}
-func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o FailFastCallOption) after(*callInfo, *csAttempt) {}
// OnFinish returns a CallOption that configures a callback to be called when
// the call completes. The error passed to the callback is the status of the
@@ -343,7 +339,7 @@ func (o OnFinishCallOption) before(c *callInfo) error {
return nil
}
-func (o OnFinishCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o OnFinishCallOption) after(*callInfo, *csAttempt) {}
// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size
// in bytes the client can receive. If this is not set, gRPC uses the default
@@ -367,7 +363,7 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error {
c.maxReceiveMessageSize = &o.MaxRecvMsgSize
return nil
}
-func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o MaxRecvMsgSizeCallOption) after(*callInfo, *csAttempt) {}
// MaxCallSendMsgSize returns a CallOption which sets the maximum message size
// in bytes the client can send. If this is not set, gRPC uses the default
@@ -391,7 +387,7 @@ func (o MaxSendMsgSizeCallOption) before(c *callInfo) error {
c.maxSendMessageSize = &o.MaxSendMsgSize
return nil
}
-func (o MaxSendMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o MaxSendMsgSizeCallOption) after(*callInfo, *csAttempt) {}
// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials
// for a call.
@@ -414,7 +410,7 @@ func (o PerRPCCredsCallOption) before(c *callInfo) error {
c.creds = o.Creds
return nil
}
-func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o PerRPCCredsCallOption) after(*callInfo, *csAttempt) {}
// UseCompressor returns a CallOption which sets the compressor used when
// sending the request. If WithCompressor is also set, UseCompressor has
@@ -442,7 +438,7 @@ func (o CompressorCallOption) before(c *callInfo) error {
c.compressorType = o.CompressorType
return nil
}
-func (o CompressorCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o CompressorCallOption) after(*callInfo, *csAttempt) {}
// CallContentSubtype returns a CallOption that will set the content-subtype
// for a call. For example, if content-subtype is "json", the Content-Type over
@@ -479,7 +475,7 @@ func (o ContentSubtypeCallOption) before(c *callInfo) error {
c.contentSubtype = o.ContentSubtype
return nil
}
-func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o ContentSubtypeCallOption) after(*callInfo, *csAttempt) {}
// ForceCodec returns a CallOption that will set codec to be used for all
// request and response messages for a call. The result of calling Name() will
@@ -515,10 +511,50 @@ type ForceCodecCallOption struct {
}
func (o ForceCodecCallOption) before(c *callInfo) error {
- c.codec = o.Codec
+ c.codec = newCodecV1Bridge(o.Codec)
return nil
}
-func (o ForceCodecCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o ForceCodecCallOption) after(*callInfo, *csAttempt) {}
+
+// ForceCodecV2 returns a CallOption that will set codec to be used for all
+// request and response messages for a call. The result of calling Name() will
+// be used as the content-subtype after converting to lowercase, unless
+// CallContentSubtype is also used.
+//
+// See Content-Type on
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details. Also see the documentation on RegisterCodec and
+// CallContentSubtype for more details on the interaction between Codec and
+// content-subtype.
+//
+// This function is provided for advanced users; prefer to use only
+// CallContentSubtype to select a registered codec instead.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func ForceCodecV2(codec encoding.CodecV2) CallOption {
+ return ForceCodecV2CallOption{CodecV2: codec}
+}
+
+// ForceCodecV2CallOption is a CallOption that indicates the codec used for
+// marshaling messages.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type ForceCodecV2CallOption struct {
+ CodecV2 encoding.CodecV2
+}
+
+func (o ForceCodecV2CallOption) before(c *callInfo) error {
+ c.codec = o.CodecV2
+ return nil
+}
+
+func (o ForceCodecV2CallOption) after(*callInfo, *csAttempt) {}
// CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of
// an encoding.Codec.
@@ -540,10 +576,10 @@ type CustomCodecCallOption struct {
}
func (o CustomCodecCallOption) before(c *callInfo) error {
- c.codec = o.Codec
+ c.codec = newCodecV0Bridge(o.Codec)
return nil
}
-func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o CustomCodecCallOption) after(*callInfo, *csAttempt) {}
// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory
// used for buffering this RPC's requests for retry purposes.
@@ -571,7 +607,7 @@ func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error {
c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize
return nil
}
-func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo, attempt *csAttempt) {}
+func (o MaxRetryRPCBufferSizeCallOption) after(*callInfo, *csAttempt) {}
// The format of the payload: compressed or not?
type payloadFormat uint8
@@ -581,19 +617,28 @@ const (
compressionMade payloadFormat = 1 // compressed
)
+func (pf payloadFormat) isCompressed() bool {
+ return pf == compressionMade
+}
+
+type streamReader interface {
+ ReadHeader(header []byte) error
+ Read(n int) (mem.BufferSlice, error)
+}
+
// parser reads complete gRPC messages from the underlying reader.
type parser struct {
// r is the underlying reader.
// See the comment on recvMsg for the permissible
// error types.
- r io.Reader
+ r streamReader
// The header of a gRPC message. Find more detail at
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md
header [5]byte
- // recvBufferPool is the pool of shared receive buffers.
- recvBufferPool SharedBufferPool
+ // bufferPool is the pool of shared receive buffers.
+ bufferPool mem.BufferPool
}
// recvMsg reads a complete gRPC message from the stream.
@@ -608,14 +653,15 @@ type parser struct {
// - an error from the status package
//
// No other error values or types must be returned, which also means
-// that the underlying io.Reader must not return an incompatible
+// that the underlying streamReader must not return an incompatible
// error.
-func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) {
- if _, err := p.r.Read(p.header[:]); err != nil {
+func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSlice, error) {
+ err := p.r.ReadHeader(p.header[:])
+ if err != nil {
return 0, nil, err
}
- pf = payloadFormat(p.header[0])
+ pf := payloadFormat(p.header[0])
length := binary.BigEndian.Uint32(p.header[1:])
if length == 0 {
@@ -627,20 +673,21 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt
if int(length) > maxReceiveMessageSize {
return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize)
}
- msg = p.recvBufferPool.Get(int(length))
- if _, err := p.r.Read(msg); err != nil {
+
+ data, err := p.r.Read(int(length))
+ if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return 0, nil, err
}
- return pf, msg, nil
+ return pf, data, nil
}
// encode serializes msg and returns a buffer containing the message, or an
// error if it is too large to be transmitted by grpc. If msg is nil, it
// generates an empty message.
-func encode(c baseCodec, msg any) ([]byte, error) {
+func encode(c baseCodec, msg any) (mem.BufferSlice, error) {
if msg == nil { // NOTE: typed nils will not be caught by this check
return nil, nil
}
@@ -648,7 +695,8 @@ func encode(c baseCodec, msg any) ([]byte, error) {
if err != nil {
return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error())
}
- if uint(len(b)) > math.MaxUint32 {
+ if uint(b.Len()) > math.MaxUint32 {
+ b.Free()
return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b))
}
return b, nil
@@ -659,34 +707,41 @@ func encode(c baseCodec, msg any) ([]byte, error) {
// indicating no compression was done.
//
// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor.
-func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) {
- if compressor == nil && cp == nil {
- return nil, nil
- }
- if len(in) == 0 {
- return nil, nil
+func compress(in mem.BufferSlice, cp Compressor, compressor encoding.Compressor, pool mem.BufferPool) (mem.BufferSlice, payloadFormat, error) {
+ if (compressor == nil && cp == nil) || in.Len() == 0 {
+ return nil, compressionNone, nil
}
+ var out mem.BufferSlice
+ w := mem.NewWriter(&out, pool)
wrapErr := func(err error) error {
+ out.Free()
return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
}
- cbuf := &bytes.Buffer{}
if compressor != nil {
- z, err := compressor.Compress(cbuf)
+ z, err := compressor.Compress(w)
if err != nil {
- return nil, wrapErr(err)
+ return nil, 0, wrapErr(err)
}
- if _, err := z.Write(in); err != nil {
- return nil, wrapErr(err)
+ for _, b := range in {
+ if _, err := z.Write(b.ReadOnlyData()); err != nil {
+ return nil, 0, wrapErr(err)
+ }
}
if err := z.Close(); err != nil {
- return nil, wrapErr(err)
+ return nil, 0, wrapErr(err)
}
} else {
- if err := cp.Do(cbuf, in); err != nil {
- return nil, wrapErr(err)
+ // This is obviously really inefficient since it fully materializes the data, but
+ // there is no way around this with the old Compressor API. At least it attempts
+ // to return the buffer to the provider, in the hopes it can be reused (maybe
+ // even by a subsequent call to this very function).
+ buf := in.MaterializeToBuffer(pool)
+ defer buf.Free()
+ if err := cp.Do(w, buf.ReadOnlyData()); err != nil {
+ return nil, 0, wrapErr(err)
}
}
- return cbuf.Bytes(), nil
+ return out, compressionMade, nil
}
const (
@@ -697,33 +752,36 @@ const (
// msgHeader returns a 5-byte header for the message being transmitted and the
// payload, which is compData if non-nil or data otherwise.
-func msgHeader(data, compData []byte) (hdr []byte, payload []byte) {
+func msgHeader(data, compData mem.BufferSlice, pf payloadFormat) (hdr []byte, payload mem.BufferSlice) {
hdr = make([]byte, headerLen)
- if compData != nil {
- hdr[0] = byte(compressionMade)
- data = compData
+ hdr[0] = byte(pf)
+
+ var length uint32
+ if pf.isCompressed() {
+ length = uint32(compData.Len())
+ payload = compData
} else {
- hdr[0] = byte(compressionNone)
+ length = uint32(data.Len())
+ payload = data
}
// Write length of payload into buf
- binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data)))
- return hdr, data
+ binary.BigEndian.PutUint32(hdr[payloadLen:], length)
+ return hdr, payload
}
-func outPayload(client bool, msg any, data, payload []byte, t time.Time) *stats.OutPayload {
+func outPayload(client bool, msg any, dataLength, payloadLength int, t time.Time) *stats.OutPayload {
return &stats.OutPayload{
Client: client,
Payload: msg,
- Data: data,
- Length: len(data),
- WireLength: len(payload) + headerLen,
- CompressedLength: len(payload),
+ Length: dataLength,
+ WireLength: payloadLength + headerLen,
+ CompressedLength: payloadLength,
SentTime: t,
}
}
-func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status {
+func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool, isServer bool) *status.Status {
switch pf {
case compressionNone:
case compressionMade:
@@ -731,7 +789,11 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool
return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding")
}
if !haveCompressor {
- return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
+ if isServer {
+ return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
+ } else {
+ return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
+ }
}
default:
return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf)
@@ -741,104 +803,129 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool
type payloadInfo struct {
compressedLength int // The compressed length got from wire.
- uncompressedBytes []byte
+ uncompressedBytes mem.BufferSlice
+}
+
+func (p *payloadInfo) free() {
+ if p != nil && p.uncompressedBytes != nil {
+ p.uncompressedBytes.Free()
+ }
}
// recvAndDecompress reads a message from the stream, decompressing it if necessary.
//
// Cancelling the returned cancel function releases the buffer back to the pool. So the caller should cancel as soon as
// the buffer is no longer needed.
-func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor,
-) (uncompressedBuf []byte, cancel func(), err error) {
- pf, compressedBuf, err := p.recvMsg(maxReceiveMessageSize)
+// TODO: Refactor this function to reduce the number of arguments.
+// See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists
+func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool,
+) (out mem.BufferSlice, err error) {
+ pf, compressed, err := p.recvMsg(maxReceiveMessageSize)
if err != nil {
- return nil, nil, err
+ return nil, err
}
- if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil {
- return nil, nil, st.Err()
+ compressedLength := compressed.Len()
+
+ if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil, isServer); st != nil {
+ compressed.Free()
+ return nil, st.Err()
}
var size int
- if pf == compressionMade {
+ if pf.isCompressed() {
+ defer compressed.Free()
+
// To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor,
// use this decompressor as the default.
if dc != nil {
- uncompressedBuf, err = dc.Do(bytes.NewReader(compressedBuf))
+ var uncompressedBuf []byte
+ uncompressedBuf, err = dc.Do(compressed.Reader())
+ if err == nil {
+ out = mem.BufferSlice{mem.NewBuffer(&uncompressedBuf, nil)}
+ }
size = len(uncompressedBuf)
} else {
- uncompressedBuf, size, err = decompress(compressor, compressedBuf, maxReceiveMessageSize)
+ out, size, err = decompress(compressor, compressed, maxReceiveMessageSize, p.bufferPool)
}
if err != nil {
- return nil, nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err)
+ return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err)
}
if size > maxReceiveMessageSize {
+ out.Free()
// TODO: Revisit the error code. Currently keep it consistent with java
// implementation.
- return nil, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize)
+ return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize)
}
} else {
- uncompressedBuf = compressedBuf
+ out = compressed
}
if payInfo != nil {
- payInfo.compressedLength = len(compressedBuf)
- payInfo.uncompressedBytes = uncompressedBuf
-
- cancel = func() {}
- } else {
- cancel = func() {
- p.recvBufferPool.Put(&compressedBuf)
- }
+ payInfo.compressedLength = compressedLength
+ out.Ref()
+ payInfo.uncompressedBytes = out
}
- return uncompressedBuf, cancel, nil
+ return out, nil
}
// Using compressor, decompress d, returning data and size.
// Optionally, if data will be over maxReceiveMessageSize, just return the size.
-func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) {
- dcReader, err := compressor.Decompress(bytes.NewReader(d))
+func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMessageSize int, pool mem.BufferPool) (mem.BufferSlice, int, error) {
+ dcReader, err := compressor.Decompress(d.Reader())
if err != nil {
return nil, 0, err
}
- if sizer, ok := compressor.(interface {
- DecompressedSize(compressedBytes []byte) int
- }); ok {
- if size := sizer.DecompressedSize(d); size >= 0 {
- if size > maxReceiveMessageSize {
- return nil, size, nil
- }
- // size is used as an estimate to size the buffer, but we
- // will read more data if available.
- // +MinRead so ReadFrom will not reallocate if size is correct.
- //
- // TODO: If we ensure that the buffer size is the same as the DecompressedSize,
- // we can also utilize the recv buffer pool here.
- buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead))
- bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
- return buf.Bytes(), int(bytesRead), err
- }
+
+ // TODO: Can/should this still be preserved with the new BufferSlice API? Are
+ // there any actual benefits to allocating a single large buffer instead of
+ // multiple smaller ones?
+ //if sizer, ok := compressor.(interface {
+ // DecompressedSize(compressedBytes []byte) int
+ //}); ok {
+ // if size := sizer.DecompressedSize(d); size >= 0 {
+ // if size > maxReceiveMessageSize {
+ // return nil, size, nil
+ // }
+ // // size is used as an estimate to size the buffer, but we
+ // // will read more data if available.
+ // // +MinRead so ReadFrom will not reallocate if size is correct.
+ // //
+ // // TODO: If we ensure that the buffer size is the same as the DecompressedSize,
+ // // we can also utilize the recv buffer pool here.
+ // buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead))
+ // bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
+ // return buf.Bytes(), int(bytesRead), err
+ // }
+ //}
+
+ var out mem.BufferSlice
+ _, err = io.Copy(mem.NewWriter(&out, pool), io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
+ if err != nil {
+ out.Free()
+ return nil, 0, err
}
- // Read from LimitReader with limit max+1. So if the underlying
- // reader is over limit, the result will be bigger than max.
- d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
- return d, len(d), err
+ return out, out.Len(), nil
}
// For the two compressor parameters, both should not be set, but if they are,
// dc takes precedence over compressor.
// TODO(dfawley): wrap the old compressor/decompressor using the new API?
-func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error {
- buf, cancel, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor)
+func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error {
+ data, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor, isServer)
if err != nil {
return err
}
- defer cancel()
- if err := c.Unmarshal(buf, m); err != nil {
+ // If the codec wants its own reference to the data, it can get it. Otherwise, always
+ // free the buffers.
+ defer data.Free()
+
+ if err := c.Unmarshal(data, m); err != nil {
return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err)
}
+
return nil
}
@@ -941,7 +1028,7 @@ func setCallInfoCodec(c *callInfo) error {
// encoding.Codec (Name vs. String method name). We only support
// setting content subtype from encoding.Codec to avoid a behavior
// change with the deprecated version.
- if ec, ok := c.codec.(encoding.Codec); ok {
+ if ec, ok := c.codec.(encoding.CodecV2); ok {
c.contentSubtype = strings.ToLower(ec.Name())
}
}
@@ -950,12 +1037,12 @@ func setCallInfoCodec(c *callInfo) error {
if c.contentSubtype == "" {
// No codec specified in CallOptions; use proto by default.
- c.codec = encoding.GetCodec(proto.Name)
+ c.codec = getCodec(proto.Name)
return nil
}
// c.contentSubtype is already lowercased in CallContentSubtype
- c.codec = encoding.GetCodec(c.contentSubtype)
+ c.codec = getCodec(c.contentSubtype)
if c.codec == nil {
return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype)
}
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
index 89f8e4792b..d1e1415a40 100644
--- a/vendor/google.golang.org/grpc/server.go
+++ b/vendor/google.golang.org/grpc/server.go
@@ -45,6 +45,7 @@ import (
"google.golang.org/grpc/internal/grpcutil"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
@@ -80,7 +81,7 @@ func init() {
}
internal.BinaryLogger = binaryLogger
internal.JoinServerOptions = newJoinServerOption
- internal.RecvBufferPool = recvBufferPool
+ internal.BufferPool = bufferPool
}
var statusOK = status.New(codes.OK, "")
@@ -170,7 +171,7 @@ type serverOptions struct {
maxHeaderListSize *uint32
headerTableSize *uint32
numServerWorkers uint32
- recvBufferPool SharedBufferPool
+ bufferPool mem.BufferPool
waitForHandlers bool
}
@@ -181,7 +182,7 @@ var defaultServerOptions = serverOptions{
connectionTimeout: 120 * time.Second,
writeBufferSize: defaultWriteBufSize,
readBufferSize: defaultReadBufSize,
- recvBufferPool: nopBufferPool{},
+ bufferPool: mem.DefaultBufferPool(),
}
var globalServerOptions []ServerOption
@@ -313,7 +314,7 @@ func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption {
// Will be supported throughout 1.x.
func CustomCodec(codec Codec) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
- o.codec = codec
+ o.codec = newCodecV0Bridge(codec)
})
}
@@ -342,7 +343,22 @@ func CustomCodec(codec Codec) ServerOption {
// later release.
func ForceServerCodec(codec encoding.Codec) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
- o.codec = codec
+ o.codec = newCodecV1Bridge(codec)
+ })
+}
+
+// ForceServerCodecV2 is the equivalent of ForceServerCodec, but for the new
+// CodecV2 interface.
+//
+// Will be supported throughout 1.x.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func ForceServerCodecV2(codecV2 encoding.CodecV2) ServerOption {
+ return newFuncServerOption(func(o *serverOptions) {
+ o.codec = codecV2
})
}
@@ -592,26 +608,9 @@ func WaitForHandlers(w bool) ServerOption {
})
}
-// RecvBufferPool returns a ServerOption that configures the server
-// to use the provided shared buffer pool for parsing incoming messages. Depending
-// on the application's workload, this could result in reduced memory allocation.
-//
-// If you are unsure about how to implement a memory pool but want to utilize one,
-// begin with grpc.NewSharedBufferPool.
-//
-// Note: The shared buffer pool feature will not be active if any of the following
-// options are used: StatsHandler, EnableTracing, or binary logging. In such
-// cases, the shared buffer pool will be ignored.
-//
-// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in
-// v1.60.0 or later.
-func RecvBufferPool(bufferPool SharedBufferPool) ServerOption {
- return recvBufferPool(bufferPool)
-}
-
-func recvBufferPool(bufferPool SharedBufferPool) ServerOption {
+func bufferPool(bufferPool mem.BufferPool) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
- o.recvBufferPool = bufferPool
+ o.bufferPool = bufferPool
})
}
@@ -622,7 +621,7 @@ func recvBufferPool(bufferPool SharedBufferPool) ServerOption {
// workload (assuming a QPS of a few thousand requests/sec).
const serverWorkerResetThreshold = 1 << 16
-// serverWorkers blocks on a *transport.Stream channel forever and waits for
+// serverWorker blocks on a *transport.Stream channel forever and waits for
// data to be fed by serveStreams. This allows multiple requests to be
// processed by the same goroutine, removing the need for expensive stack
// re-allocations (see the runtime.morestack problem [1]).
@@ -980,6 +979,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
ChannelzParent: s.channelz,
MaxHeaderListSize: s.opts.maxHeaderListSize,
HeaderTableSize: s.opts.headerTableSize,
+ BufferPool: s.opts.bufferPool,
}
st, err := transport.NewServerTransport(c, config)
if err != nil {
@@ -1072,7 +1072,7 @@ var _ http.Handler = (*Server)(nil)
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers)
+ st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers, s.opts.bufferPool)
if err != nil {
// Errors returned from transport.NewServerHandlerTransport have
// already been written to w.
@@ -1142,20 +1142,35 @@ func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport,
channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err)
return err
}
- compData, err := compress(data, cp, comp)
+
+ compData, pf, err := compress(data, cp, comp, s.opts.bufferPool)
if err != nil {
+ data.Free()
channelz.Error(logger, s.channelz, "grpc: server failed to compress response: ", err)
return err
}
- hdr, payload := msgHeader(data, compData)
+
+ hdr, payload := msgHeader(data, compData, pf)
+
+ defer func() {
+ compData.Free()
+ data.Free()
+ // payload does not need to be freed here, it is either data or compData, both of
+ // which are already freed.
+ }()
+
+ dataLen := data.Len()
+ payloadLen := payload.Len()
// TODO(dfawley): should we be checking len(data) instead?
- if len(payload) > s.opts.maxSendMessageSize {
- return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize)
+ if payloadLen > s.opts.maxSendMessageSize {
+ return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize)
}
err = t.Write(stream, hdr, payload, opts)
if err == nil {
- for _, sh := range s.opts.statsHandlers {
- sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now()))
+ if len(s.opts.statsHandlers) != 0 {
+ for _, sh := range s.opts.statsHandlers {
+ sh.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now()))
+ }
}
}
return err
@@ -1334,37 +1349,37 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor
var payInfo *payloadInfo
if len(shs) != 0 || len(binlogs) != 0 {
payInfo = &payloadInfo{}
+ defer payInfo.free()
}
- d, cancel, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
+ d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true)
if err != nil {
if e := t.WriteStatus(stream, status.Convert(err)); e != nil {
channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
}
return err
}
+ defer d.Free()
if channelz.IsOn() {
t.IncrMsgRecv()
}
df := func(v any) error {
- defer cancel()
-
if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil {
return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
}
+
for _, sh := range shs {
sh.HandleRPC(ctx, &stats.InPayload{
RecvTime: time.Now(),
Payload: v,
- Length: len(d),
+ Length: d.Len(),
WireLength: payInfo.compressedLength + headerLen,
CompressedLength: payInfo.compressedLength,
- Data: d,
})
}
if len(binlogs) != 0 {
cm := &binarylog.ClientMessage{
- Message: d,
+ Message: d.Materialize(),
}
for _, binlog := range binlogs {
binlog.Log(ctx, cm)
@@ -1548,7 +1563,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran
ctx: ctx,
t: t,
s: stream,
- p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool},
+ p: &parser{r: stream, bufferPool: s.opts.bufferPool},
codec: s.getCodec(stream.ContentSubtype()),
maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
maxSendMessageSize: s.opts.maxSendMessageSize,
@@ -1963,12 +1978,12 @@ func (s *Server) getCodec(contentSubtype string) baseCodec {
return s.opts.codec
}
if contentSubtype == "" {
- return encoding.GetCodec(proto.Name)
+ return getCodec(proto.Name)
}
- codec := encoding.GetCodec(contentSubtype)
+ codec := getCodec(contentSubtype)
if codec == nil {
logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name)
- return encoding.GetCodec(proto.Name)
+ return getCodec(proto.Name)
}
return codec
}
diff --git a/vendor/google.golang.org/grpc/shared_buffer_pool.go b/vendor/google.golang.org/grpc/shared_buffer_pool.go
deleted file mode 100644
index 48a64cfe8e..0000000000
--- a/vendor/google.golang.org/grpc/shared_buffer_pool.go
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- *
- * Copyright 2023 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import "sync"
-
-// SharedBufferPool is a pool of buffers that can be shared, resulting in
-// decreased memory allocation. Currently, in gRPC-go, it is only utilized
-// for parsing incoming messages.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-type SharedBufferPool interface {
- // Get returns a buffer with specified length from the pool.
- //
- // The returned byte slice may be not zero initialized.
- Get(length int) []byte
-
- // Put returns a buffer to the pool.
- Put(*[]byte)
-}
-
-// NewSharedBufferPool creates a simple SharedBufferPool with buckets
-// of different sizes to optimize memory usage. This prevents the pool from
-// wasting large amounts of memory, even when handling messages of varying sizes.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-func NewSharedBufferPool() SharedBufferPool {
- return &simpleSharedBufferPool{
- pools: [poolArraySize]simpleSharedBufferChildPool{
- newBytesPool(level0PoolMaxSize),
- newBytesPool(level1PoolMaxSize),
- newBytesPool(level2PoolMaxSize),
- newBytesPool(level3PoolMaxSize),
- newBytesPool(level4PoolMaxSize),
- newBytesPool(0),
- },
- }
-}
-
-// simpleSharedBufferPool is a simple implementation of SharedBufferPool.
-type simpleSharedBufferPool struct {
- pools [poolArraySize]simpleSharedBufferChildPool
-}
-
-func (p *simpleSharedBufferPool) Get(size int) []byte {
- return p.pools[p.poolIdx(size)].Get(size)
-}
-
-func (p *simpleSharedBufferPool) Put(bs *[]byte) {
- p.pools[p.poolIdx(cap(*bs))].Put(bs)
-}
-
-func (p *simpleSharedBufferPool) poolIdx(size int) int {
- switch {
- case size <= level0PoolMaxSize:
- return level0PoolIdx
- case size <= level1PoolMaxSize:
- return level1PoolIdx
- case size <= level2PoolMaxSize:
- return level2PoolIdx
- case size <= level3PoolMaxSize:
- return level3PoolIdx
- case size <= level4PoolMaxSize:
- return level4PoolIdx
- default:
- return levelMaxPoolIdx
- }
-}
-
-const (
- level0PoolMaxSize = 16 // 16 B
- level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B
- level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB
- level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB
- level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB
-)
-
-const (
- level0PoolIdx = iota
- level1PoolIdx
- level2PoolIdx
- level3PoolIdx
- level4PoolIdx
- levelMaxPoolIdx
- poolArraySize
-)
-
-type simpleSharedBufferChildPool interface {
- Get(size int) []byte
- Put(any)
-}
-
-type bufferPool struct {
- sync.Pool
-
- defaultSize int
-}
-
-func (p *bufferPool) Get(size int) []byte {
- bs := p.Pool.Get().(*[]byte)
-
- if cap(*bs) < size {
- p.Pool.Put(bs)
-
- return make([]byte, size)
- }
-
- return (*bs)[:size]
-}
-
-func newBytesPool(size int) simpleSharedBufferChildPool {
- return &bufferPool{
- Pool: sync.Pool{
- New: func() any {
- bs := make([]byte, size)
- return &bs
- },
- },
- defaultSize: size,
- }
-}
-
-// nopBufferPool is a buffer pool just makes new buffer without pooling.
-type nopBufferPool struct {
-}
-
-func (nopBufferPool) Get(length int) []byte {
- return make([]byte, length)
-}
-
-func (nopBufferPool) Put(*[]byte) {
-}
diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go
index fdb0bd6518..71195c4943 100644
--- a/vendor/google.golang.org/grpc/stats/stats.go
+++ b/vendor/google.golang.org/grpc/stats/stats.go
@@ -77,9 +77,6 @@ type InPayload struct {
// the call to HandleRPC which provides the InPayload returns and must be
// copied if needed later.
Payload any
- // Data is the serialized message payload.
- // Deprecated: Data will be removed in the next release.
- Data []byte
// Length is the size of the uncompressed payload data. Does not include any
// framing (gRPC or HTTP/2).
@@ -150,9 +147,6 @@ type OutPayload struct {
// the call to HandleRPC which provides the OutPayload returns and must be
// copied if needed later.
Payload any
- // Data is the serialized message payload.
- // Deprecated: Data will be removed in the next release.
- Data []byte
// Length is the size of the uncompressed payload data. Does not include any
// framing (gRPC or HTTP/2).
Length int
diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
index 8051ef5b51..bb2b2a216c 100644
--- a/vendor/google.golang.org/grpc/stream.go
+++ b/vendor/google.golang.org/grpc/stream.go
@@ -41,6 +41,7 @@ import (
"google.golang.org/grpc/internal/serviceconfig"
istatus "google.golang.org/grpc/internal/status"
"google.golang.org/grpc/internal/transport"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
@@ -359,7 +360,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
cs.attempt = a
return nil
}
- if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil {
+ if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }); err != nil {
return nil, err
}
@@ -517,7 +518,7 @@ func (a *csAttempt) newStream() error {
}
a.s = s
a.ctx = s.Context()
- a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool}
+ a.p = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool}
return nil
}
@@ -566,10 +567,15 @@ type clientStream struct {
// place where we need to check if the attempt is nil.
attempt *csAttempt
// TODO(hedging): hedging will have multiple attempts simultaneously.
- committed bool // active attempt committed for retry?
- onCommit func()
- buffer []func(a *csAttempt) error // operations to replay on retry
- bufferSize int // current size of buffer
+ committed bool // active attempt committed for retry?
+ onCommit func()
+ replayBuffer []replayOp // operations to replay on retry
+ replayBufferSize int // current size of replayBuffer
+}
+
+type replayOp struct {
+ op func(a *csAttempt) error
+ cleanup func()
}
// csAttempt implements a single transport stream attempt within a
@@ -607,7 +613,12 @@ func (cs *clientStream) commitAttemptLocked() {
cs.onCommit()
}
cs.committed = true
- cs.buffer = nil
+ for _, op := range cs.replayBuffer {
+ if op.cleanup != nil {
+ op.cleanup()
+ }
+ }
+ cs.replayBuffer = nil
}
func (cs *clientStream) commitAttempt() {
@@ -732,7 +743,7 @@ func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error {
// the stream is canceled.
return err
}
- // Note that the first op in the replay buffer always sets cs.attempt
+ // Note that the first op in replayBuffer always sets cs.attempt
// if it is able to pick a transport and create a stream.
if lastErr = cs.replayBufferLocked(attempt); lastErr == nil {
return nil
@@ -761,7 +772,7 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func())
// already be status errors.
return toRPCErr(op(cs.attempt))
}
- if len(cs.buffer) == 0 {
+ if len(cs.replayBuffer) == 0 {
// For the first op, which controls creation of the stream and
// assigns cs.attempt, we need to create a new attempt inline
// before executing the first op. On subsequent ops, the attempt
@@ -851,25 +862,26 @@ func (cs *clientStream) Trailer() metadata.MD {
}
func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error {
- for _, f := range cs.buffer {
- if err := f(attempt); err != nil {
+ for _, f := range cs.replayBuffer {
+ if err := f.op(attempt); err != nil {
return err
}
}
return nil
}
-func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) {
+func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error, cleanup func()) {
// Note: we still will buffer if retry is disabled (for transparent retries).
if cs.committed {
return
}
- cs.bufferSize += sz
- if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize {
+ cs.replayBufferSize += sz
+ if cs.replayBufferSize > cs.callInfo.maxRetryRPCBufferSize {
cs.commitAttemptLocked()
+ cleanup()
return
}
- cs.buffer = append(cs.buffer, op)
+ cs.replayBuffer = append(cs.replayBuffer, replayOp{op: op, cleanup: cleanup})
}
func (cs *clientStream) SendMsg(m any) (err error) {
@@ -891,23 +903,50 @@ func (cs *clientStream) SendMsg(m any) (err error) {
}
// load hdr, payload, data
- hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp)
+ hdr, data, payload, pf, err := prepareMsg(m, cs.codec, cs.cp, cs.comp, cs.cc.dopts.copts.BufferPool)
if err != nil {
return err
}
+ defer func() {
+ data.Free()
+ // only free payload if compression was made, and therefore it is a different set
+ // of buffers from data.
+ if pf.isCompressed() {
+ payload.Free()
+ }
+ }()
+
+ dataLen := data.Len()
+ payloadLen := payload.Len()
// TODO(dfawley): should we be checking len(data) instead?
- if len(payload) > *cs.callInfo.maxSendMessageSize {
- return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize)
+ if payloadLen > *cs.callInfo.maxSendMessageSize {
+ return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, *cs.callInfo.maxSendMessageSize)
}
+
+ // always take an extra ref in case data == payload (i.e. when the data isn't
+ // compressed). The original ref will always be freed by the deferred free above.
+ payload.Ref()
op := func(a *csAttempt) error {
- return a.sendMsg(m, hdr, payload, data)
+ return a.sendMsg(m, hdr, payload, dataLen, payloadLen)
+ }
+
+ // onSuccess is invoked when the op is captured for a subsequent retry. If the
+ // stream was established by a previous message and therefore retries are
+ // disabled, onSuccess will not be invoked, and payloadRef can be freed
+ // immediately.
+ onSuccessCalled := false
+ err = cs.withRetry(op, func() {
+ cs.bufferForRetryLocked(len(hdr)+payloadLen, op, payload.Free)
+ onSuccessCalled = true
+ })
+ if !onSuccessCalled {
+ payload.Free()
}
- err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) })
if len(cs.binlogs) != 0 && err == nil {
cm := &binarylog.ClientMessage{
OnClientSide: true,
- Message: data,
+ Message: data.Materialize(),
}
for _, binlog := range cs.binlogs {
binlog.Log(cs.ctx, cm)
@@ -924,6 +963,7 @@ func (cs *clientStream) RecvMsg(m any) error {
var recvInfo *payloadInfo
if len(cs.binlogs) != 0 {
recvInfo = &payloadInfo{}
+ defer recvInfo.free()
}
err := cs.withRetry(func(a *csAttempt) error {
return a.recvMsg(m, recvInfo)
@@ -931,7 +971,7 @@ func (cs *clientStream) RecvMsg(m any) error {
if len(cs.binlogs) != 0 && err == nil {
sm := &binarylog.ServerMessage{
OnClientSide: true,
- Message: recvInfo.uncompressedBytes,
+ Message: recvInfo.uncompressedBytes.Materialize(),
}
for _, binlog := range cs.binlogs {
binlog.Log(cs.ctx, sm)
@@ -958,7 +998,7 @@ func (cs *clientStream) CloseSend() error {
// RecvMsg. This also matches historical behavior.
return nil
}
- cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) })
+ cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) })
if len(cs.binlogs) != 0 {
chc := &binarylog.ClientHalfClose{
OnClientSide: true,
@@ -1034,7 +1074,7 @@ func (cs *clientStream) finish(err error) {
cs.cancel()
}
-func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error {
+func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength, payloadLength int) error {
cs := a.cs
if a.trInfo != nil {
a.mu.Lock()
@@ -1052,8 +1092,10 @@ func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error {
}
return io.EOF
}
- for _, sh := range a.statsHandlers {
- sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now()))
+ if len(a.statsHandlers) != 0 {
+ for _, sh := range a.statsHandlers {
+ sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now()))
+ }
}
if channelz.IsOn() {
a.t.IncrMsgSent()
@@ -1065,6 +1107,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
cs := a.cs
if len(a.statsHandlers) != 0 && payInfo == nil {
payInfo = &payloadInfo{}
+ defer payInfo.free()
}
if !a.decompSet {
@@ -1083,8 +1126,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
// Only initialize this state once per stream.
a.decompSet = true
}
- err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp)
- if err != nil {
+ if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp, false); err != nil {
if err == io.EOF {
if statusErr := a.s.Status().Err(); statusErr != nil {
return statusErr
@@ -1103,14 +1145,12 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
}
for _, sh := range a.statsHandlers {
sh.HandleRPC(a.ctx, &stats.InPayload{
- Client: true,
- RecvTime: time.Now(),
- Payload: m,
- // TODO truncate large payload.
- Data: payInfo.uncompressedBytes,
+ Client: true,
+ RecvTime: time.Now(),
+ Payload: m,
WireLength: payInfo.compressedLength + headerLen,
CompressedLength: payInfo.compressedLength,
- Length: len(payInfo.uncompressedBytes),
+ Length: payInfo.uncompressedBytes.Len(),
})
}
if channelz.IsOn() {
@@ -1122,14 +1162,12 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
}
// Special handling for non-server-stream rpcs.
// This recv expects EOF or errors, so we don't collect inPayload.
- err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp)
- if err == nil {
- return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want "))
- }
- if err == io.EOF {
+ if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp, false); err == io.EOF {
return a.s.Status().Err() // non-server streaming Recv returns nil on success
+ } else if err != nil {
+ return toRPCErr(err)
}
- return toRPCErr(err)
+ return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want "))
}
func (a *csAttempt) finish(err error) {
@@ -1185,12 +1223,12 @@ func (a *csAttempt) finish(err error) {
a.mu.Unlock()
}
-// newClientStream creates a ClientStream with the specified transport, on the
+// newNonRetryClientStream creates a ClientStream with the specified transport, on the
// given addrConn.
//
// It's expected that the given transport is either the same one in addrConn, or
// is already closed. To avoid race, transport is specified separately, instead
-// of using ac.transpot.
+// of using ac.transport.
//
// Main difference between this and ClientConn.NewStream:
// - no retry
@@ -1276,7 +1314,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin
return nil, err
}
as.s = s
- as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool}
+ as.p = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool}
ac.incrCallsStarted()
if desc != unaryStreamDesc {
// Listen on stream context to cleanup when the stream context is
@@ -1373,17 +1411,26 @@ func (as *addrConnStream) SendMsg(m any) (err error) {
}
// load hdr, payload, data
- hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp)
+ hdr, data, payload, pf, err := prepareMsg(m, as.codec, as.cp, as.comp, as.ac.dopts.copts.BufferPool)
if err != nil {
return err
}
+ defer func() {
+ data.Free()
+ // only free payload if compression was made, and therefore it is a different set
+ // of buffers from data.
+ if pf.isCompressed() {
+ payload.Free()
+ }
+ }()
+
// TODO(dfawley): should we be checking len(data) instead?
- if len(payld) > *as.callInfo.maxSendMessageSize {
- return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize)
+ if payload.Len() > *as.callInfo.maxSendMessageSize {
+ return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize)
}
- if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil {
+ if err := as.t.Write(as.s, hdr, payload, &transport.Options{Last: !as.desc.ClientStreams}); err != nil {
if !as.desc.ClientStreams {
// For non-client-streaming RPCs, we return nil instead of EOF on error
// because the generated code requires it. finish is not called; RecvMsg()
@@ -1423,8 +1470,7 @@ func (as *addrConnStream) RecvMsg(m any) (err error) {
// Only initialize this state once per stream.
as.decompSet = true
}
- err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp)
- if err != nil {
+ if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err != nil {
if err == io.EOF {
if statusErr := as.s.Status().Err(); statusErr != nil {
return statusErr
@@ -1444,14 +1490,12 @@ func (as *addrConnStream) RecvMsg(m any) (err error) {
// Special handling for non-server-stream rpcs.
// This recv expects EOF or errors, so we don't collect inPayload.
- err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp)
- if err == nil {
- return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want "))
- }
- if err == io.EOF {
+ if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err == io.EOF {
return as.s.Status().Err() // non-server streaming Recv returns nil on success
+ } else if err != nil {
+ return toRPCErr(err)
}
- return toRPCErr(err)
+ return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want "))
}
func (as *addrConnStream) finish(err error) {
@@ -1645,18 +1689,31 @@ func (ss *serverStream) SendMsg(m any) (err error) {
}
// load hdr, payload, data
- hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp)
+ hdr, data, payload, pf, err := prepareMsg(m, ss.codec, ss.cp, ss.comp, ss.p.bufferPool)
if err != nil {
return err
}
+ defer func() {
+ data.Free()
+ // only free payload if compression was made, and therefore it is a different set
+ // of buffers from data.
+ if pf.isCompressed() {
+ payload.Free()
+ }
+ }()
+
+ dataLen := data.Len()
+ payloadLen := payload.Len()
+
// TODO(dfawley): should we be checking len(data) instead?
- if len(payload) > ss.maxSendMessageSize {
- return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize)
+ if payloadLen > ss.maxSendMessageSize {
+ return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, ss.maxSendMessageSize)
}
if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil {
return toRPCErr(err)
}
+
if len(ss.binlogs) != 0 {
if !ss.serverHeaderBinlogged {
h, _ := ss.s.Header()
@@ -1669,7 +1726,7 @@ func (ss *serverStream) SendMsg(m any) (err error) {
}
}
sm := &binarylog.ServerMessage{
- Message: data,
+ Message: data.Materialize(),
}
for _, binlog := range ss.binlogs {
binlog.Log(ss.ctx, sm)
@@ -1677,7 +1734,7 @@ func (ss *serverStream) SendMsg(m any) (err error) {
}
if len(ss.statsHandler) != 0 {
for _, sh := range ss.statsHandler {
- sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now()))
+ sh.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now()))
}
}
return nil
@@ -1714,8 +1771,9 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
var payInfo *payloadInfo
if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 {
payInfo = &payloadInfo{}
+ defer payInfo.free()
}
- if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil {
+ if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp, true); err != nil {
if err == io.EOF {
if len(ss.binlogs) != 0 {
chc := &binarylog.ClientHalfClose{}
@@ -1733,11 +1791,9 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
if len(ss.statsHandler) != 0 {
for _, sh := range ss.statsHandler {
sh.HandleRPC(ss.s.Context(), &stats.InPayload{
- RecvTime: time.Now(),
- Payload: m,
- // TODO truncate large payload.
- Data: payInfo.uncompressedBytes,
- Length: len(payInfo.uncompressedBytes),
+ RecvTime: time.Now(),
+ Payload: m,
+ Length: payInfo.uncompressedBytes.Len(),
WireLength: payInfo.compressedLength + headerLen,
CompressedLength: payInfo.compressedLength,
})
@@ -1745,7 +1801,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
}
if len(ss.binlogs) != 0 {
cm := &binarylog.ClientMessage{
- Message: payInfo.uncompressedBytes,
+ Message: payInfo.uncompressedBytes.Materialize(),
}
for _, binlog := range ss.binlogs {
binlog.Log(ss.ctx, cm)
@@ -1760,23 +1816,26 @@ func MethodFromServerStream(stream ServerStream) (string, bool) {
return Method(stream.Context())
}
-// prepareMsg returns the hdr, payload and data
-// using the compressors passed or using the
-// passed preparedmsg
-func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) {
+// prepareMsg returns the hdr, payload and data using the compressors passed or
+// using the passed preparedmsg. The returned boolean indicates whether
+// compression was made and therefore whether the payload needs to be freed in
+// addition to the returned data. Freeing the payload if the returned boolean is
+// false can lead to undefined behavior.
+func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor, pool mem.BufferPool) (hdr []byte, data, payload mem.BufferSlice, pf payloadFormat, err error) {
if preparedMsg, ok := m.(*PreparedMsg); ok {
- return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil
+ return preparedMsg.hdr, preparedMsg.encodedData, preparedMsg.payload, preparedMsg.pf, nil
}
// The input interface is not a prepared msg.
// Marshal and Compress the data at this point
data, err = encode(codec, m)
if err != nil {
- return nil, nil, nil, err
+ return nil, nil, nil, 0, err
}
- compData, err := compress(data, cp, comp)
+ compData, pf, err := compress(data, cp, comp, pool)
if err != nil {
- return nil, nil, nil, err
+ data.Free()
+ return nil, nil, nil, 0, err
}
- hdr, payload = msgHeader(data, compData)
- return hdr, payload, data, nil
+ hdr, payload = msgHeader(data, compData, pf)
+ return hdr, data, payload, pf, nil
}
diff --git a/vendor/google.golang.org/grpc/stream_interfaces.go b/vendor/google.golang.org/grpc/stream_interfaces.go
index 8b813529c0..0037fee0bd 100644
--- a/vendor/google.golang.org/grpc/stream_interfaces.go
+++ b/vendor/google.golang.org/grpc/stream_interfaces.go
@@ -22,15 +22,35 @@ package grpc
// request, many responses) RPC. It is generic over the type of the response
// message. It is used in generated code.
type ServerStreamingClient[Res any] interface {
+ // Recv receives the next response message from the server. The client may
+ // repeatedly call Recv to read messages from the response stream. If
+ // io.EOF is returned, the stream has terminated with an OK status. Any
+ // other error is compatible with the status package and indicates the
+ // RPC's status code and message.
Recv() (*Res, error)
+
+ // ClientStream is embedded to provide Context, Header, and Trailer
+ // functionality. No other methods in the ClientStream should be called
+ // directly.
ClientStream
}
// ServerStreamingServer represents the server side of a server-streaming (one
// request, many responses) RPC. It is generic over the type of the response
// message. It is used in generated code.
+//
+// To terminate the response stream, return from the handler method and return
+// an error from the status package, or use nil to indicate an OK status code.
type ServerStreamingServer[Res any] interface {
+ // Send sends a response message to the client. The server handler may
+ // call Send multiple times to send multiple messages to the client. An
+ // error is returned if the stream was terminated unexpectedly, and the
+ // handler method should return, as the stream is no longer usable.
Send(*Res) error
+
+ // ServerStream is embedded to provide Context, SetHeader, SendHeader, and
+ // SetTrailer functionality. No other methods in the ServerStream should
+ // be called directly.
ServerStream
}
@@ -39,8 +59,22 @@ type ServerStreamingServer[Res any] interface {
// message stream and the type of the unary response message. It is used in
// generated code.
type ClientStreamingClient[Req any, Res any] interface {
+ // Send sends a request message to the server. The client may call Send
+ // multiple times to send multiple messages to the server. On error, Send
+ // aborts the stream. If the error was generated by the client, the status
+ // is returned directly. Otherwise, io.EOF is returned, and the status of
+ // the stream may be discovered using CloseAndRecv().
Send(*Req) error
+
+ // CloseAndRecv closes the request stream and waits for the server's
+ // response. This method must be called once and only once after sending
+ // all request messages. Any error returned is implemented by the status
+ // package.
CloseAndRecv() (*Res, error)
+
+ // ClientStream is embedded to provide Context, Header, and Trailer
+ // functionality. No other methods in the ClientStream should be called
+ // directly.
ClientStream
}
@@ -48,9 +82,28 @@ type ClientStreamingClient[Req any, Res any] interface {
// requests, one response) RPC. It is generic over both the type of the request
// message stream and the type of the unary response message. It is used in
// generated code.
+//
+// To terminate the RPC, call SendAndClose and return nil from the method
+// handler or do not call SendAndClose and return an error from the status
+// package.
type ClientStreamingServer[Req any, Res any] interface {
+ // Recv receives the next request message from the client. The server may
+ // repeatedly call Recv to read messages from the request stream. If
+ // io.EOF is returned, it indicates the client called CloseAndRecv on its
+ // ClientStreamingClient. Any other error indicates the stream was
+ // terminated unexpectedly, and the handler method should return, as the
+ // stream is no longer usable.
Recv() (*Req, error)
+
+ // SendAndClose sends a single response message to the client and closes
+ // the stream. This method must be called once and only once after all
+ // request messages have been processed. Recv should not be called after
+ // calling SendAndClose.
SendAndClose(*Res) error
+
+ // ServerStream is embedded to provide Context, SetHeader, SendHeader, and
+ // SetTrailer functionality. No other methods in the ServerStream should
+ // be called directly.
ServerStream
}
@@ -59,8 +112,23 @@ type ClientStreamingServer[Req any, Res any] interface {
// request message stream and the type of the response message stream. It is
// used in generated code.
type BidiStreamingClient[Req any, Res any] interface {
+ // Send sends a request message to the server. The client may call Send
+ // multiple times to send multiple messages to the server. On error, Send
+ // aborts the stream. If the error was generated by the client, the status
+ // is returned directly. Otherwise, io.EOF is returned, and the status of
+ // the stream may be discovered using Recv().
Send(*Req) error
+
+ // Recv receives the next response message from the server. The client may
+ // repeatedly call Recv to read messages from the response stream. If
+ // io.EOF is returned, the stream has terminated with an OK status. Any
+ // other error is compatible with the status package and indicates the
+ // RPC's status code and message.
Recv() (*Res, error)
+
+ // ClientStream is embedded to provide Context, Header, Trailer, and
+ // CloseSend functionality. No other methods in the ClientStream should be
+ // called directly.
ClientStream
}
@@ -68,9 +136,27 @@ type BidiStreamingClient[Req any, Res any] interface {
// (many requests, many responses) RPC. It is generic over both the type of the
// request message stream and the type of the response message stream. It is
// used in generated code.
+//
+// To terminate the stream, return from the handler method and return
+// an error from the status package, or use nil to indicate an OK status code.
type BidiStreamingServer[Req any, Res any] interface {
+ // Recv receives the next request message from the client. The server may
+ // repeatedly call Recv to read messages from the request stream. If
+ // io.EOF is returned, it indicates the client called CloseSend on its
+ // BidiStreamingClient. Any other error indicates the stream was
+ // terminated unexpectedly, and the handler method should return, as the
+ // stream is no longer usable.
Recv() (*Req, error)
+
+ // Send sends a response message to the client. The server handler may
+ // call Send multiple times to send multiple messages to the client. An
+ // error is returned if the stream was terminated unexpectedly, and the
+ // handler method should return, as the stream is no longer usable.
Send(*Res) error
+
+ // ServerStream is embedded to provide Context, SetHeader, SendHeader, and
+ // SetTrailer functionality. No other methods in the ServerStream should
+ // be called directly.
ServerStream
}
diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
index bafaef99be..187fbf1195 100644
--- a/vendor/google.golang.org/grpc/version.go
+++ b/vendor/google.golang.org/grpc/version.go
@@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
-const Version = "1.65.0"
+const Version = "1.67.0"
diff --git a/vendor/knative.dev/eventing/hack/update-codegen.sh b/vendor/knative.dev/eventing/hack/update-codegen.sh
index fdeb803f9c..a0fe67dcbf 100644
--- a/vendor/knative.dev/eventing/hack/update-codegen.sh
+++ b/vendor/knative.dev/eventing/hack/update-codegen.sh
@@ -19,6 +19,7 @@ set -o nounset
set -o pipefail
source $(dirname $0)/../vendor/knative.dev/hack/codegen-library.sh
+source "${CODEGEN_PKG}/kube_codegen.sh"
# If we run with -mod=vendor here, then generate-groups.sh looks for vendor files in the wrong place.
export GOFLAGS=-mod=
@@ -32,27 +33,16 @@ ${REPO_ROOT_DIR}/hack/update-checksums.sh
group "Kubernetes Codegen"
-# generate the code with:
-# --output-base because this script should also be able to run inside the vendor dir of
-# k8s.io/kubernetes. The output-base is needed for the generators to output into the vendor dir
-# instead of the $GOPATH directly. For normal projects this can be dropped.
-${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \
- knative.dev/eventing/pkg/client knative.dev/eventing/pkg/apis \
- "sinks:v1alpha1 eventing:v1alpha1 eventing:v1beta1 eventing:v1beta2 eventing:v1beta3 eventing:v1 messaging:v1 flows:v1 sources:v1beta2 sources:v1" \
- --go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt
-
-# Deep copy config
-${GOPATH}/bin/deepcopy-gen \
- -O zz_generated.deepcopy \
- --go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt \
- -i knative.dev/eventing/pkg/apis/config \
- -i knative.dev/eventing/pkg/apis/messaging/config \
+kube::codegen::gen_helpers \
+ --boilerplate "${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt" \
+ "${REPO_ROOT_DIR}/pkg/apis"
-# Only deepcopy the Duck types, as they are not real resources.
-${CODEGEN_PKG}/generate-groups.sh "deepcopy" \
- knative.dev/eventing/pkg/client knative.dev/eventing/pkg/apis \
- "duck:v1beta1 duck:v1alpha1 duck:v1" \
- --go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt
+kube::codegen::gen_client \
+ --boilerplate "${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt" \
+ --output-dir "${REPO_ROOT_DIR}/pkg/client" \
+ --output-pkg "knative.dev/eventing/pkg/client" \
+ --with-watch \
+ "${REPO_ROOT_DIR}/pkg/apis"
group "Knative Codegen"
diff --git a/vendor/knative.dev/eventing/pkg/apis/messaging/v1/zz_generated.defaults.go b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/zz_generated.defaults.go
new file mode 100644
index 0000000000..1f5150c891
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/zz_generated.defaults.go
@@ -0,0 +1,33 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by defaulter-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// RegisterDefaults adds defaulters functions to the given scheme.
+// Public to allow building arbitrary schemes.
+// All generated defaulters are covering - they call all nested defaulters.
+func RegisterDefaults(scheme *runtime.Scheme) error {
+ return nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/zz_generated.defaults.go b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/zz_generated.defaults.go
new file mode 100644
index 0000000000..99317ad98f
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/zz_generated.defaults.go
@@ -0,0 +1,138 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by defaulter-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// RegisterDefaults adds defaulters functions to the given scheme.
+// Public to allow building arbitrary schemes.
+// All generated defaulters are covering - they call all nested defaulters.
+func RegisterDefaults(scheme *runtime.Scheme) error {
+ scheme.AddTypeDefaultingFunc(&JobSink{}, func(obj interface{}) { SetObjectDefaults_JobSink(obj.(*JobSink)) })
+ return nil
+}
+
+func SetObjectDefaults_JobSink(in *JobSink) {
+ if in.Spec.Job != nil {
+ for i := range in.Spec.Job.Spec.Template.Spec.InitContainers {
+ a := &in.Spec.Job.Spec.Template.Spec.InitContainers[i]
+ for j := range a.Ports {
+ b := &a.Ports[j]
+ if b.Protocol == "" {
+ b.Protocol = "TCP"
+ }
+ }
+ if a.LivenessProbe != nil {
+ if a.LivenessProbe.ProbeHandler.GRPC != nil {
+ if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
+ var ptrVar1 string = ""
+ a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
+ }
+ }
+ }
+ if a.ReadinessProbe != nil {
+ if a.ReadinessProbe.ProbeHandler.GRPC != nil {
+ if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
+ var ptrVar1 string = ""
+ a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
+ }
+ }
+ }
+ if a.StartupProbe != nil {
+ if a.StartupProbe.ProbeHandler.GRPC != nil {
+ if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
+ var ptrVar1 string = ""
+ a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
+ }
+ }
+ }
+ }
+ for i := range in.Spec.Job.Spec.Template.Spec.Containers {
+ a := &in.Spec.Job.Spec.Template.Spec.Containers[i]
+ for j := range a.Ports {
+ b := &a.Ports[j]
+ if b.Protocol == "" {
+ b.Protocol = "TCP"
+ }
+ }
+ if a.LivenessProbe != nil {
+ if a.LivenessProbe.ProbeHandler.GRPC != nil {
+ if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
+ var ptrVar1 string = ""
+ a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
+ }
+ }
+ }
+ if a.ReadinessProbe != nil {
+ if a.ReadinessProbe.ProbeHandler.GRPC != nil {
+ if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
+ var ptrVar1 string = ""
+ a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
+ }
+ }
+ }
+ if a.StartupProbe != nil {
+ if a.StartupProbe.ProbeHandler.GRPC != nil {
+ if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
+ var ptrVar1 string = ""
+ a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
+ }
+ }
+ }
+ }
+ for i := range in.Spec.Job.Spec.Template.Spec.EphemeralContainers {
+ a := &in.Spec.Job.Spec.Template.Spec.EphemeralContainers[i]
+ for j := range a.EphemeralContainerCommon.Ports {
+ b := &a.EphemeralContainerCommon.Ports[j]
+ if b.Protocol == "" {
+ b.Protocol = "TCP"
+ }
+ }
+ if a.EphemeralContainerCommon.LivenessProbe != nil {
+ if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC != nil {
+ if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service == nil {
+ var ptrVar1 string = ""
+ a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
+ }
+ }
+ }
+ if a.EphemeralContainerCommon.ReadinessProbe != nil {
+ if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC != nil {
+ if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
+ var ptrVar1 string = ""
+ a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
+ }
+ }
+ }
+ if a.EphemeralContainerCommon.StartupProbe != nil {
+ if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC != nil {
+ if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service == nil {
+ var ptrVar1 string = ""
+ a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1/zz_generated.defaults.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1/zz_generated.defaults.go
new file mode 100644
index 0000000000..1f5150c891
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1/zz_generated.defaults.go
@@ -0,0 +1,33 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by defaulter-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// RegisterDefaults adds defaulters functions to the given scheme.
+// Public to allow building arbitrary schemes.
+// All generated defaulters are covering - they call all nested defaulters.
+func RegisterDefaults(scheme *runtime.Scheme) error {
+ return nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1beta2/zz_generated.defaults.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1beta2/zz_generated.defaults.go
new file mode 100644
index 0000000000..00b88f9aff
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1beta2/zz_generated.defaults.go
@@ -0,0 +1,33 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by defaulter-gen. DO NOT EDIT.
+
+package v1beta2
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// RegisterDefaults adds defaulters functions to the given scheme.
+// Public to allow building arbitrary schemes.
+// All generated defaulters are covering - they call all nested defaulters.
+func RegisterDefaults(scheme *runtime.Scheme) error {
+ return nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/auth/token_verifier.go b/vendor/knative.dev/eventing/pkg/auth/verifier.go
similarity index 84%
rename from vendor/knative.dev/eventing/pkg/auth/token_verifier.go
rename to vendor/knative.dev/eventing/pkg/auth/verifier.go
index 0d2c3888b9..3cf9c436bc 100644
--- a/vendor/knative.dev/eventing/pkg/auth/token_verifier.go
+++ b/vendor/knative.dev/eventing/pkg/auth/verifier.go
@@ -27,7 +27,6 @@ import (
"time"
duckv1 "knative.dev/eventing/pkg/apis/duck/v1"
- eventpolicyinformer "knative.dev/eventing/pkg/client/injection/informers/eventing/v1alpha1/eventpolicy"
"knative.dev/eventing/pkg/client/listers/eventing/v1alpha1"
"github.com/cloudevents/sdk-go/v2/binding"
@@ -37,6 +36,7 @@ import (
"k8s.io/client-go/rest"
eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1"
"knative.dev/eventing/pkg/apis/feature"
+ listerseventingv1alpha1 "knative.dev/eventing/pkg/client/listers/eventing/v1alpha1"
"knative.dev/pkg/injection"
"knative.dev/pkg/logging"
)
@@ -45,7 +45,7 @@ const (
kubernetesOIDCDiscoveryBaseURL = "https://kubernetes.default.svc"
)
-type OIDCTokenVerifier struct {
+type Verifier struct {
logger *zap.SugaredLogger
restConfig *rest.Config
provider *oidc.Provider
@@ -61,11 +61,11 @@ type IDToken struct {
AccessTokenHash string
}
-func NewOIDCTokenVerifier(ctx context.Context) *OIDCTokenVerifier {
- tokenHandler := &OIDCTokenVerifier{
+func NewVerifier(ctx context.Context, eventPolicyLister listerseventingv1alpha1.EventPolicyLister) *Verifier {
+ tokenHandler := &Verifier{
logger: logging.FromContext(ctx).With("component", "oidc-token-handler"),
restConfig: injection.GetConfig(ctx),
- eventPolicyLister: eventpolicyinformer.Get(ctx).Lister(),
+ eventPolicyLister: eventPolicyLister,
}
if err := tokenHandler.initOIDCProvider(ctx); err != nil {
@@ -75,18 +75,9 @@ func NewOIDCTokenVerifier(ctx context.Context) *OIDCTokenVerifier {
return tokenHandler
}
-// VerifyJWTFromRequest verifies if the incoming request contains a correct JWT token
-//
-// Deprecated: use OIDCTokenVerifier.Verify() instead to bundle AuthN and AuthZ verification
-func (v *OIDCTokenVerifier) VerifyJWTFromRequest(ctx context.Context, r *http.Request, audience *string, response http.ResponseWriter) error {
- _, err := v.verifyAuthN(ctx, audience, r, response)
-
- return err
-}
-
// VerifyRequest verifies AuthN and AuthZ in the request. On verification errors, it sets the
// responses HTTP status and returns an error
-func (v *OIDCTokenVerifier) VerifyRequest(ctx context.Context, features feature.Flags, requiredOIDCAudience *string, resourceNamespace string, policyRefs []duckv1.AppliedEventPolicyRef, req *http.Request, resp http.ResponseWriter) error {
+func (v *Verifier) VerifyRequest(ctx context.Context, features feature.Flags, requiredOIDCAudience *string, resourceNamespace string, policyRefs []duckv1.AppliedEventPolicyRef, req *http.Request, resp http.ResponseWriter) error {
if !features.IsOIDCAuthentication() {
return nil
}
@@ -109,7 +100,7 @@ func (v *OIDCTokenVerifier) VerifyRequest(ctx context.Context, features feature.
// On verification errors, it sets the responses HTTP status and returns an error.
// This method is similar to VerifyRequest() except that VerifyRequestFromSubject()
// verifies in the AuthZ part that the request comes from a given subject.
-func (v *OIDCTokenVerifier) VerifyRequestFromSubject(ctx context.Context, features feature.Flags, requiredOIDCAudience *string, allowedSubject string, req *http.Request, resp http.ResponseWriter) error {
+func (v *Verifier) VerifyRequestFromSubject(ctx context.Context, features feature.Flags, requiredOIDCAudience *string, allowedSubject string, req *http.Request, resp http.ResponseWriter) error {
if !features.IsOIDCAuthentication() {
return nil
}
@@ -128,7 +119,7 @@ func (v *OIDCTokenVerifier) VerifyRequestFromSubject(ctx context.Context, featur
}
// verifyAuthN verifies if the incoming request contains a correct JWT token
-func (v *OIDCTokenVerifier) verifyAuthN(ctx context.Context, audience *string, req *http.Request, resp http.ResponseWriter) (*IDToken, error) {
+func (v *Verifier) verifyAuthN(ctx context.Context, audience *string, req *http.Request, resp http.ResponseWriter) (*IDToken, error) {
token := GetJWTFromHeader(req.Header)
if token == "" {
resp.WriteHeader(http.StatusUnauthorized)
@@ -150,7 +141,7 @@ func (v *OIDCTokenVerifier) verifyAuthN(ctx context.Context, audience *string, r
}
// verifyAuthZ verifies if the given idToken is allowed by the resources eventPolicyStatus
-func (v *OIDCTokenVerifier) verifyAuthZ(ctx context.Context, features feature.Flags, idToken *IDToken, resourceNamespace string, policyRefs []duckv1.AppliedEventPolicyRef, req *http.Request, resp http.ResponseWriter) error {
+func (v *Verifier) verifyAuthZ(ctx context.Context, features feature.Flags, idToken *IDToken, resourceNamespace string, policyRefs []duckv1.AppliedEventPolicyRef, req *http.Request, resp http.ResponseWriter) error {
if len(policyRefs) > 0 {
req, err := copyRequest(req)
if err != nil {
@@ -204,7 +195,7 @@ func (v *OIDCTokenVerifier) verifyAuthZ(ctx context.Context, features feature.Fl
}
// verifyJWT verifies the given JWT for the expected audience and returns the parsed ID token.
-func (v *OIDCTokenVerifier) verifyJWT(ctx context.Context, jwt, audience string) (*IDToken, error) {
+func (v *Verifier) verifyJWT(ctx context.Context, jwt, audience string) (*IDToken, error) {
if v.provider == nil {
return nil, fmt.Errorf("provider is nil. Is the OIDC provider config correct?")
}
@@ -228,7 +219,7 @@ func (v *OIDCTokenVerifier) verifyJWT(ctx context.Context, jwt, audience string)
}, nil
}
-func (v *OIDCTokenVerifier) initOIDCProvider(ctx context.Context) error {
+func (v *Verifier) initOIDCProvider(ctx context.Context) error {
discovery, err := v.getKubernetesOIDCDiscovery()
if err != nil {
return fmt.Errorf("could not load Kubernetes OIDC discovery information: %w", err)
@@ -256,7 +247,7 @@ func (v *OIDCTokenVerifier) initOIDCProvider(ctx context.Context) error {
return nil
}
-func (v *OIDCTokenVerifier) getHTTPClientForKubeAPIServer() (*http.Client, error) {
+func (v *Verifier) getHTTPClientForKubeAPIServer() (*http.Client, error) {
client, err := rest.HTTPClientFor(v.restConfig)
if err != nil {
return nil, fmt.Errorf("could not create HTTP client from rest config: %w", err)
@@ -265,7 +256,7 @@ func (v *OIDCTokenVerifier) getHTTPClientForKubeAPIServer() (*http.Client, error
return client, nil
}
-func (v *OIDCTokenVerifier) getKubernetesOIDCDiscovery() (*openIDMetadata, error) {
+func (v *Verifier) getKubernetesOIDCDiscovery() (*openIDMetadata, error) {
client, err := v.getHTTPClientForKubeAPIServer()
if err != nil {
return nil, fmt.Errorf("could not get HTTP client for API server: %w", err)
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/clientset.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/clientset.go
index 0ca8d7350b..72890b65cc 100644
--- a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/clientset.go
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/clientset.go
@@ -39,31 +39,36 @@ import (
type Interface interface {
Discovery() discovery.DiscoveryInterface
+ EventingV1() eventingv1.EventingV1Interface
EventingV1alpha1() eventingv1alpha1.EventingV1alpha1Interface
EventingV1beta1() eventingv1beta1.EventingV1beta1Interface
EventingV1beta2() eventingv1beta2.EventingV1beta2Interface
EventingV1beta3() eventingv1beta3.EventingV1beta3Interface
- EventingV1() eventingv1.EventingV1Interface
FlowsV1() flowsv1.FlowsV1Interface
MessagingV1() messagingv1.MessagingV1Interface
SinksV1alpha1() sinksv1alpha1.SinksV1alpha1Interface
- SourcesV1beta2() sourcesv1beta2.SourcesV1beta2Interface
SourcesV1() sourcesv1.SourcesV1Interface
+ SourcesV1beta2() sourcesv1beta2.SourcesV1beta2Interface
}
// Clientset contains the clients for groups.
type Clientset struct {
*discovery.DiscoveryClient
+ eventingV1 *eventingv1.EventingV1Client
eventingV1alpha1 *eventingv1alpha1.EventingV1alpha1Client
eventingV1beta1 *eventingv1beta1.EventingV1beta1Client
eventingV1beta2 *eventingv1beta2.EventingV1beta2Client
eventingV1beta3 *eventingv1beta3.EventingV1beta3Client
- eventingV1 *eventingv1.EventingV1Client
flowsV1 *flowsv1.FlowsV1Client
messagingV1 *messagingv1.MessagingV1Client
sinksV1alpha1 *sinksv1alpha1.SinksV1alpha1Client
- sourcesV1beta2 *sourcesv1beta2.SourcesV1beta2Client
sourcesV1 *sourcesv1.SourcesV1Client
+ sourcesV1beta2 *sourcesv1beta2.SourcesV1beta2Client
+}
+
+// EventingV1 retrieves the EventingV1Client
+func (c *Clientset) EventingV1() eventingv1.EventingV1Interface {
+ return c.eventingV1
}
// EventingV1alpha1 retrieves the EventingV1alpha1Client
@@ -86,11 +91,6 @@ func (c *Clientset) EventingV1beta3() eventingv1beta3.EventingV1beta3Interface {
return c.eventingV1beta3
}
-// EventingV1 retrieves the EventingV1Client
-func (c *Clientset) EventingV1() eventingv1.EventingV1Interface {
- return c.eventingV1
-}
-
// FlowsV1 retrieves the FlowsV1Client
func (c *Clientset) FlowsV1() flowsv1.FlowsV1Interface {
return c.flowsV1
@@ -106,16 +106,16 @@ func (c *Clientset) SinksV1alpha1() sinksv1alpha1.SinksV1alpha1Interface {
return c.sinksV1alpha1
}
-// SourcesV1beta2 retrieves the SourcesV1beta2Client
-func (c *Clientset) SourcesV1beta2() sourcesv1beta2.SourcesV1beta2Interface {
- return c.sourcesV1beta2
-}
-
// SourcesV1 retrieves the SourcesV1Client
func (c *Clientset) SourcesV1() sourcesv1.SourcesV1Interface {
return c.sourcesV1
}
+// SourcesV1beta2 retrieves the SourcesV1beta2Client
+func (c *Clientset) SourcesV1beta2() sourcesv1beta2.SourcesV1beta2Interface {
+ return c.sourcesV1beta2
+}
+
// Discovery retrieves the DiscoveryClient
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
if c == nil {
@@ -160,6 +160,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset,
var cs Clientset
var err error
+ cs.eventingV1, err = eventingv1.NewForConfigAndClient(&configShallowCopy, httpClient)
+ if err != nil {
+ return nil, err
+ }
cs.eventingV1alpha1, err = eventingv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient)
if err != nil {
return nil, err
@@ -176,10 +180,6 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset,
if err != nil {
return nil, err
}
- cs.eventingV1, err = eventingv1.NewForConfigAndClient(&configShallowCopy, httpClient)
- if err != nil {
- return nil, err
- }
cs.flowsV1, err = flowsv1.NewForConfigAndClient(&configShallowCopy, httpClient)
if err != nil {
return nil, err
@@ -192,11 +192,11 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset,
if err != nil {
return nil, err
}
- cs.sourcesV1beta2, err = sourcesv1beta2.NewForConfigAndClient(&configShallowCopy, httpClient)
+ cs.sourcesV1, err = sourcesv1.NewForConfigAndClient(&configShallowCopy, httpClient)
if err != nil {
return nil, err
}
- cs.sourcesV1, err = sourcesv1.NewForConfigAndClient(&configShallowCopy, httpClient)
+ cs.sourcesV1beta2, err = sourcesv1beta2.NewForConfigAndClient(&configShallowCopy, httpClient)
if err != nil {
return nil, err
}
@@ -221,16 +221,16 @@ func NewForConfigOrDie(c *rest.Config) *Clientset {
// New creates a new Clientset for the given RESTClient.
func New(c rest.Interface) *Clientset {
var cs Clientset
+ cs.eventingV1 = eventingv1.New(c)
cs.eventingV1alpha1 = eventingv1alpha1.New(c)
cs.eventingV1beta1 = eventingv1beta1.New(c)
cs.eventingV1beta2 = eventingv1beta2.New(c)
cs.eventingV1beta3 = eventingv1beta3.New(c)
- cs.eventingV1 = eventingv1.New(c)
cs.flowsV1 = flowsv1.New(c)
cs.messagingV1 = messagingv1.New(c)
cs.sinksV1alpha1 = sinksv1alpha1.New(c)
- cs.sourcesV1beta2 = sourcesv1beta2.New(c)
cs.sourcesV1 = sourcesv1.New(c)
+ cs.sourcesV1beta2 = sourcesv1beta2.New(c)
cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
return &cs
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/fake/clientset_generated.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/fake/clientset_generated.go
index ce9c364948..8346286413 100644
--- a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/fake/clientset_generated.go
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/fake/clientset_generated.go
@@ -97,6 +97,11 @@ var (
_ testing.FakeClient = &Clientset{}
)
+// EventingV1 retrieves the EventingV1Client
+func (c *Clientset) EventingV1() eventingv1.EventingV1Interface {
+ return &fakeeventingv1.FakeEventingV1{Fake: &c.Fake}
+}
+
// EventingV1alpha1 retrieves the EventingV1alpha1Client
func (c *Clientset) EventingV1alpha1() eventingv1alpha1.EventingV1alpha1Interface {
return &fakeeventingv1alpha1.FakeEventingV1alpha1{Fake: &c.Fake}
@@ -117,11 +122,6 @@ func (c *Clientset) EventingV1beta3() eventingv1beta3.EventingV1beta3Interface {
return &fakeeventingv1beta3.FakeEventingV1beta3{Fake: &c.Fake}
}
-// EventingV1 retrieves the EventingV1Client
-func (c *Clientset) EventingV1() eventingv1.EventingV1Interface {
- return &fakeeventingv1.FakeEventingV1{Fake: &c.Fake}
-}
-
// FlowsV1 retrieves the FlowsV1Client
func (c *Clientset) FlowsV1() flowsv1.FlowsV1Interface {
return &fakeflowsv1.FakeFlowsV1{Fake: &c.Fake}
@@ -137,12 +137,12 @@ func (c *Clientset) SinksV1alpha1() sinksv1alpha1.SinksV1alpha1Interface {
return &fakesinksv1alpha1.FakeSinksV1alpha1{Fake: &c.Fake}
}
-// SourcesV1beta2 retrieves the SourcesV1beta2Client
-func (c *Clientset) SourcesV1beta2() sourcesv1beta2.SourcesV1beta2Interface {
- return &fakesourcesv1beta2.FakeSourcesV1beta2{Fake: &c.Fake}
-}
-
// SourcesV1 retrieves the SourcesV1Client
func (c *Clientset) SourcesV1() sourcesv1.SourcesV1Interface {
return &fakesourcesv1.FakeSourcesV1{Fake: &c.Fake}
}
+
+// SourcesV1beta2 retrieves the SourcesV1beta2Client
+func (c *Clientset) SourcesV1beta2() sourcesv1beta2.SourcesV1beta2Interface {
+ return &fakesourcesv1beta2.FakeSourcesV1beta2{Fake: &c.Fake}
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/fake/register.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/fake/register.go
index 69946bd6e5..6fc03b4ba5 100644
--- a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/fake/register.go
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/fake/register.go
@@ -40,16 +40,16 @@ var scheme = runtime.NewScheme()
var codecs = serializer.NewCodecFactory(scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
+ eventingv1.AddToScheme,
eventingv1alpha1.AddToScheme,
eventingv1beta1.AddToScheme,
eventingv1beta2.AddToScheme,
eventingv1beta3.AddToScheme,
- eventingv1.AddToScheme,
flowsv1.AddToScheme,
messagingv1.AddToScheme,
sinksv1alpha1.AddToScheme,
- sourcesv1beta2.AddToScheme,
sourcesv1.AddToScheme,
+ sourcesv1beta2.AddToScheme,
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/scheme/register.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/scheme/register.go
index e037c6c174..5d2955e038 100644
--- a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/scheme/register.go
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/scheme/register.go
@@ -40,16 +40,16 @@ var Scheme = runtime.NewScheme()
var Codecs = serializer.NewCodecFactory(Scheme)
var ParameterCodec = runtime.NewParameterCodec(Scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
+ eventingv1.AddToScheme,
eventingv1alpha1.AddToScheme,
eventingv1beta1.AddToScheme,
eventingv1beta2.AddToScheme,
eventingv1beta3.AddToScheme,
- eventingv1.AddToScheme,
flowsv1.AddToScheme,
messagingv1.AddToScheme,
sinksv1alpha1.AddToScheme,
- sourcesv1beta2.AddToScheme,
sourcesv1.AddToScheme,
+ sourcesv1beta2.AddToScheme,
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/interface.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/interface.go
index ccfc67a42a..760bafcecd 100644
--- a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/interface.go
+++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/interface.go
@@ -29,6 +29,8 @@ import (
// Interface provides access to each of this group's versions.
type Interface interface {
+ // V1 provides access to shared informers for resources in V1.
+ V1() v1.Interface
// V1alpha1 provides access to shared informers for resources in V1alpha1.
V1alpha1() v1alpha1.Interface
// V1beta1 provides access to shared informers for resources in V1beta1.
@@ -37,8 +39,6 @@ type Interface interface {
V1beta2() v1beta2.Interface
// V1beta3 provides access to shared informers for resources in V1beta3.
V1beta3() v1beta3.Interface
- // V1 provides access to shared informers for resources in V1.
- V1() v1.Interface
}
type group struct {
@@ -52,6 +52,11 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList
return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
+// V1 returns a new v1.Interface.
+func (g *group) V1() v1.Interface {
+ return v1.New(g.factory, g.namespace, g.tweakListOptions)
+}
+
// V1alpha1 returns a new v1alpha1.Interface.
func (g *group) V1alpha1() v1alpha1.Interface {
return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions)
@@ -71,8 +76,3 @@ func (g *group) V1beta2() v1beta2.Interface {
func (g *group) V1beta3() v1beta3.Interface {
return v1beta3.New(g.factory, g.namespace, g.tweakListOptions)
}
-
-// V1 returns a new v1.Interface.
-func (g *group) V1() v1.Interface {
- return v1.New(g.factory, g.namespace, g.tweakListOptions)
-}
diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sources/interface.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sources/interface.go
index eb406b621f..c8b6385943 100644
--- a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sources/interface.go
+++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sources/interface.go
@@ -26,10 +26,10 @@ import (
// Interface provides access to each of this group's versions.
type Interface interface {
- // V1beta2 provides access to shared informers for resources in V1beta2.
- V1beta2() v1beta2.Interface
// V1 provides access to shared informers for resources in V1.
V1() v1.Interface
+ // V1beta2 provides access to shared informers for resources in V1beta2.
+ V1beta2() v1beta2.Interface
}
type group struct {
@@ -43,12 +43,12 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList
return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
-// V1beta2 returns a new v1beta2.Interface.
-func (g *group) V1beta2() v1beta2.Interface {
- return v1beta2.New(g.factory, g.namespace, g.tweakListOptions)
-}
-
// V1 returns a new v1.Interface.
func (g *group) V1() v1.Interface {
return v1.New(g.factory, g.namespace, g.tweakListOptions)
}
+
+// V1beta2 returns a new v1beta2.Interface.
+func (g *group) V1beta2() v1beta2.Interface {
+ return v1beta2.New(g.factory, g.namespace, g.tweakListOptions)
+}
diff --git a/vendor/knative.dev/eventing/pkg/scheduler/scheduler.go b/vendor/knative.dev/eventing/pkg/scheduler/scheduler.go
index 88e470d8b4..a9ca7b1d5a 100644
--- a/vendor/knative.dev/eventing/pkg/scheduler/scheduler.go
+++ b/vendor/knative.dev/eventing/pkg/scheduler/scheduler.go
@@ -92,18 +92,18 @@ type Evictor func(pod *corev1.Pod, vpod VPod, from *duckv1alpha1.Placement) erro
// Scheduler is responsible for placing VPods into real Kubernetes pods
type Scheduler interface {
// Schedule computes the new set of placements for vpod.
- Schedule(vpod VPod) ([]duckv1alpha1.Placement, error)
+ Schedule(ctx context.Context, vpod VPod) ([]duckv1alpha1.Placement, error)
}
// SchedulerFunc type is an adapter to allow the use of
// ordinary functions as Schedulers. If f is a function
// with the appropriate signature, SchedulerFunc(f) is a
// Scheduler that calls f.
-type SchedulerFunc func(vpod VPod) ([]duckv1alpha1.Placement, error)
+type SchedulerFunc func(ctx context.Context, vpod VPod) ([]duckv1alpha1.Placement, error)
// Schedule implements the Scheduler interface.
-func (f SchedulerFunc) Schedule(vpod VPod) ([]duckv1alpha1.Placement, error) {
- return f(vpod)
+func (f SchedulerFunc) Schedule(ctx context.Context, vpod VPod) ([]duckv1alpha1.Placement, error) {
+ return f(ctx, vpod)
}
// VPod represents virtual replicas placed into real Kubernetes pods
diff --git a/vendor/knative.dev/eventing/pkg/scheduler/state/helpers.go b/vendor/knative.dev/eventing/pkg/scheduler/state/helpers.go
index 5ec66b2156..ad3a5aaf76 100644
--- a/vendor/knative.dev/eventing/pkg/scheduler/state/helpers.go
+++ b/vendor/knative.dev/eventing/pkg/scheduler/state/helpers.go
@@ -25,6 +25,7 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
+
"knative.dev/eventing/pkg/scheduler"
)
@@ -55,10 +56,10 @@ func SatisfyZoneAvailability(feasiblePods []int32, states *State) bool {
var zoneName string
var err error
for _, podID := range feasiblePods {
- wait.PollUntilContextTimeout(context.Background(), 50*time.Millisecond, 5*time.Second, true, func(ctx context.Context) (bool, error) {
- zoneName, _, err = states.GetPodInfo(PodNameFromOrdinal(states.StatefulSetName, podID))
- return err == nil, nil
- })
+ zoneName, _, err = states.GetPodInfo(PodNameFromOrdinal(states.StatefulSetName, podID))
+ if err != nil {
+ continue
+ }
zoneMap[zoneName] = struct{}{}
}
return len(zoneMap) == int(states.NumZones)
diff --git a/vendor/knative.dev/eventing/pkg/scheduler/state/state.go b/vendor/knative.dev/eventing/pkg/scheduler/state/state.go
index aa84ca996f..44069babe9 100644
--- a/vendor/knative.dev/eventing/pkg/scheduler/state/state.go
+++ b/vendor/knative.dev/eventing/pkg/scheduler/state/state.go
@@ -22,7 +22,6 @@ import (
"errors"
"math"
"strconv"
- "time"
"go.uber.org/zap"
v1 "k8s.io/api/core/v1"
@@ -30,9 +29,7 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
- "k8s.io/apimachinery/pkg/util/wait"
corev1 "k8s.io/client-go/listers/core/v1"
-
"knative.dev/pkg/logging"
"knative.dev/eventing/pkg/scheduler"
@@ -42,7 +39,7 @@ type StateAccessor interface {
// State returns the current state (snapshot) about placed vpods
// Take into account reserved vreplicas and update `reserved` to reflect
// the current state.
- State(reserved map[types.NamespacedName]map[string]int32) (*State, error)
+ State(ctx context.Context, reserved map[types.NamespacedName]map[string]int32) (*State, error)
}
// state provides information about the current scheduling of all vpods
@@ -152,8 +149,6 @@ func (s *State) IsSchedulablePod(ordinal int32) bool {
// stateBuilder reconstruct the state from scratch, by listing vpods
type stateBuilder struct {
- ctx context.Context
- logger *zap.SugaredLogger
vpodLister scheduler.VPodLister
capacity int32
schedulerPolicy scheduler.SchedulerPolicyType
@@ -166,11 +161,9 @@ type stateBuilder struct {
}
// NewStateBuilder returns a StateAccessor recreating the state from scratch each time it is requested
-func NewStateBuilder(ctx context.Context, namespace, sfsname string, lister scheduler.VPodLister, podCapacity int32, schedulerPolicy scheduler.SchedulerPolicyType, schedPolicy *scheduler.SchedulerPolicy, deschedPolicy *scheduler.SchedulerPolicy, podlister corev1.PodNamespaceLister, nodeLister corev1.NodeLister, statefulSetCache *scheduler.ScaleCache) StateAccessor {
+func NewStateBuilder(sfsname string, lister scheduler.VPodLister, podCapacity int32, schedulerPolicy scheduler.SchedulerPolicyType, schedPolicy, deschedPolicy *scheduler.SchedulerPolicy, podlister corev1.PodNamespaceLister, nodeLister corev1.NodeLister, statefulSetCache *scheduler.ScaleCache) StateAccessor {
return &stateBuilder{
- ctx: ctx,
- logger: logging.FromContext(ctx),
vpodLister: lister,
capacity: podCapacity,
schedulerPolicy: schedulerPolicy,
@@ -183,15 +176,18 @@ func NewStateBuilder(ctx context.Context, namespace, sfsname string, lister sche
}
}
-func (s *stateBuilder) State(reserved map[types.NamespacedName]map[string]int32) (*State, error) {
+func (s *stateBuilder) State(ctx context.Context, reserved map[types.NamespacedName]map[string]int32) (*State, error) {
vpods, err := s.vpodLister()
if err != nil {
return nil, err
}
- scale, err := s.statefulSetCache.GetScale(s.ctx, s.statefulSetName, metav1.GetOptions{})
+ logger := logging.FromContext(ctx).With("subcomponent", "statebuilder")
+ ctx = logging.WithLogger(ctx, logger)
+
+ scale, err := s.statefulSetCache.GetScale(ctx, s.statefulSetName, metav1.GetOptions{})
if err != nil {
- s.logger.Infow("failed to get statefulset", zap.Error(err))
+ logger.Infow("failed to get statefulset", zap.Error(err))
return nil, err
}
@@ -235,36 +231,35 @@ func (s *stateBuilder) State(reserved map[types.NamespacedName]map[string]int32)
}
for podId := int32(0); podId < scale.Spec.Replicas && s.podLister != nil; podId++ {
- var pod *v1.Pod
- wait.PollUntilContextTimeout(context.Background(), 50*time.Millisecond, 5*time.Second, true, func(ctx context.Context) (bool, error) {
- pod, err = s.podLister.Get(PodNameFromOrdinal(s.statefulSetName, podId))
- return err == nil, nil
- })
-
- if pod != nil {
- if isPodUnschedulable(pod) {
- // Pod is marked for eviction - CANNOT SCHEDULE VREPS on this pod.
- continue
- }
-
- node, err := s.nodeLister.Get(pod.Spec.NodeName)
- if err != nil {
- return nil, err
- }
+ pod, err := s.podLister.Get(PodNameFromOrdinal(s.statefulSetName, podId))
+ if err != nil {
+ logger.Warnw("Failed to get pod", zap.Int32("ordinal", podId), zap.Error(err))
+ continue
+ }
+ if isPodUnschedulable(pod) {
+ // Pod is marked for eviction - CANNOT SCHEDULE VREPS on this pod.
+ logger.Debugw("Pod is unschedulable", zap.Any("pod", pod))
+ continue
+ }
- if isNodeUnschedulable(node) {
- // Node is marked as Unschedulable - CANNOT SCHEDULE VREPS on a pod running on this node.
- continue
- }
+ node, err := s.nodeLister.Get(pod.Spec.NodeName)
+ if err != nil {
+ return nil, err
+ }
- // Pod has no annotation or not annotated as unschedulable and
- // not on an unschedulable node, so add to feasible
- schedulablePods.Insert(podId)
+ if isNodeUnschedulable(node) {
+ // Node is marked as Unschedulable - CANNOT SCHEDULE VREPS on a pod running on this node.
+ logger.Debugw("Pod is on an unschedulable node", zap.Any("pod", node))
+ continue
}
+
+ // Pod has no annotation or not annotated as unschedulable and
+ // not on an unschedulable node, so add to feasible
+ schedulablePods.Insert(podId)
}
for _, p := range schedulablePods.List() {
- free, last = s.updateFreeCapacity(free, last, PodNameFromOrdinal(s.statefulSetName, p), 0)
+ free, last = s.updateFreeCapacity(logger, free, last, PodNameFromOrdinal(s.statefulSetName, p), 0)
}
// Getting current state from existing placements for all vpods
@@ -286,15 +281,14 @@ func (s *stateBuilder) State(reserved map[types.NamespacedName]map[string]int32)
// Account for reserved vreplicas
vreplicas = withReserved(vpod.GetKey(), podName, vreplicas, reserved)
- free, last = s.updateFreeCapacity(free, last, podName, vreplicas)
+ free, last = s.updateFreeCapacity(logger, free, last, podName, vreplicas)
withPlacement[vpod.GetKey()][podName] = true
- var pod *v1.Pod
- wait.PollUntilContextTimeout(context.Background(), 50*time.Millisecond, 5*time.Second, true, func(ctx context.Context) (bool, error) {
- pod, err = s.podLister.Get(podName)
- return err == nil, nil
- })
+ pod, err := s.podLister.Get(podName)
+ if err != nil {
+ logger.Warnw("Failed to get pod", zap.String("podName", podName), zap.Error(err))
+ }
if pod != nil && schedulablePods.Has(OrdinalFromPodName(pod.GetName())) {
nodeName := pod.Spec.NodeName //node name for this pod
@@ -315,11 +309,10 @@ func (s *stateBuilder) State(reserved map[types.NamespacedName]map[string]int32)
continue
}
- var pod *v1.Pod
- wait.PollUntilContextTimeout(context.Background(), 50*time.Millisecond, 5*time.Second, true, func(ctx context.Context) (bool, error) {
- pod, err = s.podLister.Get(podName)
- return err == nil, nil
- })
+ pod, err := s.podLister.Get(podName)
+ if err != nil {
+ logger.Warnw("Failed to get pod", zap.String("podName", podName), zap.Error(err))
+ }
if pod != nil && schedulablePods.Has(OrdinalFromPodName(pod.GetName())) {
nodeName := pod.Spec.NodeName //node name for this pod
@@ -330,7 +323,7 @@ func (s *stateBuilder) State(reserved map[types.NamespacedName]map[string]int32)
}
}
- free, last = s.updateFreeCapacity(free, last, podName, rvreplicas)
+ free, last = s.updateFreeCapacity(logger, free, last, podName, rvreplicas)
}
}
@@ -338,7 +331,7 @@ func (s *stateBuilder) State(reserved map[types.NamespacedName]map[string]int32)
SchedulerPolicy: s.schedulerPolicy, SchedPolicy: s.schedPolicy, DeschedPolicy: s.deschedPolicy, NodeToZoneMap: nodeToZoneMap, StatefulSetName: s.statefulSetName, PodLister: s.podLister,
PodSpread: podSpread, NodeSpread: nodeSpread, ZoneSpread: zoneSpread, Pending: pending, ExpectedVReplicaByVPod: expectedVReplicasByVPod}
- s.logger.Infow("cluster state info", zap.Any("state", state), zap.Any("reserved", toJSONable(reserved)))
+ logger.Infow("cluster state info", zap.Any("state", state), zap.Any("reserved", toJSONable(reserved)))
return state, nil
}
@@ -350,7 +343,7 @@ func pendingFromVPod(vpod scheduler.VPod) int32 {
return int32(math.Max(float64(0), float64(expected-scheduled)))
}
-func (s *stateBuilder) updateFreeCapacity(free []int32, last int32, podName string, vreplicas int32) ([]int32, int32) {
+func (s *stateBuilder) updateFreeCapacity(logger *zap.SugaredLogger, free []int32, last int32, podName string, vreplicas int32) ([]int32, int32) {
ordinal := OrdinalFromPodName(podName)
free = grow(free, ordinal, s.capacity)
@@ -359,7 +352,7 @@ func (s *stateBuilder) updateFreeCapacity(free []int32, last int32, podName stri
// Assert the pod is not overcommitted
if free[ordinal] < 0 {
// This should not happen anymore. Log as an error but do not interrupt the current scheduling.
- s.logger.Warnw("pod is overcommitted", zap.String("podName", podName), zap.Int32("free", free[ordinal]))
+ logger.Warnw("pod is overcommitted", zap.String("podName", podName), zap.Int32("free", free[ordinal]))
}
if ordinal > last {
diff --git a/vendor/knative.dev/eventing/pkg/scheduler/statefulset/autoscaler.go b/vendor/knative.dev/eventing/pkg/scheduler/statefulset/autoscaler.go
index 296feb16f2..3245dabc16 100644
--- a/vendor/knative.dev/eventing/pkg/scheduler/statefulset/autoscaler.go
+++ b/vendor/knative.dev/eventing/pkg/scheduler/statefulset/autoscaler.go
@@ -18,6 +18,7 @@ package statefulset
import (
"context"
+ "fmt"
"math"
"sync"
"sync/atomic"
@@ -27,10 +28,8 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
- "k8s.io/apimachinery/pkg/util/wait"
- "knative.dev/pkg/reconciler"
-
"knative.dev/pkg/logging"
+ "knative.dev/pkg/reconciler"
"knative.dev/eventing/pkg/scheduler"
st "knative.dev/eventing/pkg/scheduler/state"
@@ -58,9 +57,8 @@ type autoscaler struct {
statefulSetCache *scheduler.ScaleCache
statefulSetName string
vpodLister scheduler.VPodLister
- logger *zap.SugaredLogger
stateAccessor st.StateAccessor
- trigger chan struct{}
+ trigger chan context.Context
evictor scheduler.Evictor
// capacity is the total number of virtual replicas available per pod.
@@ -68,7 +66,9 @@ type autoscaler struct {
// refreshPeriod is how often the autoscaler tries to scale down the statefulset
refreshPeriod time.Duration
- lock sync.Locker
+ // retryPeriod is how often the autoscaler retry failed autoscale operations
+ retryPeriod time.Duration
+ lock sync.Locker
// isLeader signals whether a given autoscaler instance is leader or not.
// The autoscaler is considered the leader when ephemeralLeaderElectionObject is in a
@@ -104,17 +104,17 @@ func (a *autoscaler) Demote(b reconciler.Bucket) {
}
}
-func newAutoscaler(ctx context.Context, cfg *Config, stateAccessor st.StateAccessor, statefulSetCache *scheduler.ScaleCache) *autoscaler {
- return &autoscaler{
- logger: logging.FromContext(ctx).With(zap.String("component", "autoscaler")),
+func newAutoscaler(cfg *Config, stateAccessor st.StateAccessor, statefulSetCache *scheduler.ScaleCache) *autoscaler {
+ a := &autoscaler{
statefulSetCache: statefulSetCache,
statefulSetName: cfg.StatefulSetName,
vpodLister: cfg.VPodLister,
stateAccessor: stateAccessor,
evictor: cfg.Evictor,
- trigger: make(chan struct{}, 1),
+ trigger: make(chan context.Context, 1),
capacity: cfg.PodCapacity,
refreshPeriod: cfg.RefreshPeriod,
+ retryPeriod: cfg.RetryPeriod,
lock: new(sync.Mutex),
isLeader: atomic.Bool{},
getReserved: cfg.getReserved,
@@ -124,25 +124,38 @@ func newAutoscaler(ctx context.Context, cfg *Config, stateAccessor st.StateAcces
Add(-cfg.RefreshPeriod).
Add(-time.Minute),
}
+
+ if a.retryPeriod == 0 {
+ a.retryPeriod = time.Second
+ }
+
+ return a
}
func (a *autoscaler) Start(ctx context.Context) {
attemptScaleDown := false
for {
+ autoscaleCtx := ctx
select {
case <-ctx.Done():
return
case <-time.After(a.refreshPeriod):
- a.logger.Infow("Triggering scale down", zap.Bool("isLeader", a.isLeader.Load()))
+ logging.FromContext(ctx).Infow("Triggering scale down", zap.Bool("isLeader", a.isLeader.Load()))
attemptScaleDown = true
- case <-a.trigger:
- a.logger.Infow("Triggering scale up", zap.Bool("isLeader", a.isLeader.Load()))
+ case autoscaleCtx = <-a.trigger:
+ logging.FromContext(autoscaleCtx).Infow("Triggering scale up", zap.Bool("isLeader", a.isLeader.Load()))
attemptScaleDown = false
}
// Retry a few times, just so that we don't have to wait for the next beat when
// a transient error occurs
- a.syncAutoscale(ctx, attemptScaleDown)
+ if err := a.syncAutoscale(autoscaleCtx, attemptScaleDown); err != nil {
+ logging.FromContext(autoscaleCtx).Errorw("Failed to sync autoscale", zap.Error(err))
+ go func() {
+ time.Sleep(a.retryPeriod)
+ a.Autoscale(ctx) // Use top-level context for background retries
+ }()
+ }
}
}
@@ -150,10 +163,10 @@ func (a *autoscaler) Autoscale(ctx context.Context) {
select {
// We trigger the autoscaler asynchronously by using the channel so that the scale down refresh
// period is reset.
- case a.trigger <- struct{}{}:
+ case a.trigger <- ctx:
default:
// We don't want to block if the channel's buffer is full, it will be triggered eventually.
-
+ logging.FromContext(ctx).Debugw("Skipping autoscale since autoscale is in progress")
}
}
@@ -161,36 +174,34 @@ func (a *autoscaler) syncAutoscale(ctx context.Context, attemptScaleDown bool) e
a.lock.Lock()
defer a.lock.Unlock()
- var lastErr error
- wait.Poll(500*time.Millisecond, 5*time.Second, func() (bool, error) {
- err := a.doautoscale(ctx, attemptScaleDown)
- if err != nil {
- logging.FromContext(ctx).Errorw("Failed to autoscale", zap.Error(err))
- }
- lastErr = err
- return err == nil, nil
- })
- return lastErr
+ if err := a.doautoscale(ctx, attemptScaleDown); err != nil {
+ return fmt.Errorf("failed to do autoscale: %w", err)
+ }
+ return nil
}
func (a *autoscaler) doautoscale(ctx context.Context, attemptScaleDown bool) error {
if !a.isLeader.Load() {
return nil
}
- state, err := a.stateAccessor.State(a.getReserved())
+
+ logger := logging.FromContext(ctx).With("component", "autoscaler")
+ ctx = logging.WithLogger(ctx, logger)
+
+ state, err := a.stateAccessor.State(ctx, a.getReserved())
if err != nil {
- a.logger.Info("error while refreshing scheduler state (will retry)", zap.Error(err))
+ logger.Info("error while refreshing scheduler state (will retry)", zap.Error(err))
return err
}
scale, err := a.statefulSetCache.GetScale(ctx, a.statefulSetName, metav1.GetOptions{})
if err != nil {
// skip a beat
- a.logger.Infow("failed to get scale subresource", zap.Error(err))
+ logger.Infow("failed to get scale subresource", zap.Error(err))
return err
}
- a.logger.Debugw("checking adapter capacity",
+ logger.Debugw("checking adapter capacity",
zap.Int32("replicas", scale.Spec.Replicas),
zap.Any("state", state))
@@ -234,43 +245,43 @@ func (a *autoscaler) doautoscale(ctx context.Context, attemptScaleDown bool) err
if newreplicas != scale.Spec.Replicas {
scale.Spec.Replicas = newreplicas
- a.logger.Infow("updating adapter replicas", zap.Int32("replicas", scale.Spec.Replicas))
+ logger.Infow("updating adapter replicas", zap.Int32("replicas", scale.Spec.Replicas))
_, err = a.statefulSetCache.UpdateScale(ctx, a.statefulSetName, scale, metav1.UpdateOptions{})
if err != nil {
- a.logger.Errorw("updating scale subresource failed", zap.Error(err))
+ logger.Errorw("updating scale subresource failed", zap.Error(err))
return err
}
} else if attemptScaleDown {
// since the number of replicas hasn't changed and time has approached to scale down,
// take the opportunity to compact the vreplicas
- a.mayCompact(state, scaleUpFactor)
+ return a.mayCompact(logger, state, scaleUpFactor)
}
return nil
}
-func (a *autoscaler) mayCompact(s *st.State, scaleUpFactor int32) {
+func (a *autoscaler) mayCompact(logger *zap.SugaredLogger, s *st.State, scaleUpFactor int32) error {
// This avoids a too aggressive scale down by adding a "grace period" based on the refresh
// period
nextAttempt := a.lastCompactAttempt.Add(a.refreshPeriod)
if time.Now().Before(nextAttempt) {
- a.logger.Debugw("Compact was retried before refresh period",
+ logger.Debugw("Compact was retried before refresh period",
zap.Time("lastCompactAttempt", a.lastCompactAttempt),
zap.Time("nextAttempt", nextAttempt),
zap.String("refreshPeriod", a.refreshPeriod.String()),
)
- return
+ return nil
}
- a.logger.Debugw("Trying to compact and scale down",
+ logger.Debugw("Trying to compact and scale down",
zap.Int32("scaleUpFactor", scaleUpFactor),
zap.Any("state", s),
)
// when there is only one pod there is nothing to move or number of pods is just enough!
if s.LastOrdinal < 1 || len(s.SchedulablePods) <= int(scaleUpFactor) {
- return
+ return nil
}
if s.SchedulerPolicy == scheduler.MAXFILLUP {
@@ -283,7 +294,7 @@ func (a *autoscaler) mayCompact(s *st.State, scaleUpFactor int32) {
a.lastCompactAttempt = time.Now()
err := a.compact(s, scaleUpFactor)
if err != nil {
- a.logger.Errorw("vreplicas compaction failed", zap.Error(err))
+ return fmt.Errorf("vreplicas compaction failed (scaleUpFactor %d): %w", scaleUpFactor, err)
}
}
@@ -303,10 +314,11 @@ func (a *autoscaler) mayCompact(s *st.State, scaleUpFactor int32) {
a.lastCompactAttempt = time.Now()
err := a.compact(s, scaleUpFactor)
if err != nil {
- a.logger.Errorw("vreplicas compaction failed", zap.Error(err))
+ return fmt.Errorf("vreplicas compaction failed (scaleUpFactor %d): %w", scaleUpFactor, err)
}
}
}
+ return nil
}
func (a *autoscaler) compact(s *st.State, scaleUpFactor int32) error {
@@ -323,16 +335,14 @@ func (a *autoscaler) compact(s *st.State, scaleUpFactor int32) error {
ordinal := st.OrdinalFromPodName(placements[i].PodName)
if ordinal == s.LastOrdinal-j {
- wait.PollUntilContextTimeout(context.Background(), 50*time.Millisecond, 5*time.Second, true, func(ctx context.Context) (bool, error) {
- if s.PodLister != nil {
- pod, err = s.PodLister.Get(placements[i].PodName)
- }
- return err == nil, nil
- })
+ pod, err = s.PodLister.Get(placements[i].PodName)
+ if err != nil {
+ return fmt.Errorf("failed to get pod %s: %w", placements[i].PodName, err)
+ }
err = a.evictor(pod, vpod, &placements[i])
if err != nil {
- return err
+ return fmt.Errorf("failed to evict pod %s: %w", pod.Name, err)
}
}
}
diff --git a/vendor/knative.dev/eventing/pkg/scheduler/statefulset/scheduler.go b/vendor/knative.dev/eventing/pkg/scheduler/statefulset/scheduler.go
index 1256e2769d..6995d6ff45 100644
--- a/vendor/knative.dev/eventing/pkg/scheduler/statefulset/scheduler.go
+++ b/vendor/knative.dev/eventing/pkg/scheduler/statefulset/scheduler.go
@@ -33,11 +33,11 @@ import (
corev1listers "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/utils/integer"
+ "knative.dev/pkg/logging"
"knative.dev/pkg/reconciler"
kubeclient "knative.dev/pkg/client/injection/kube/client"
"knative.dev/pkg/controller"
- "knative.dev/pkg/logging"
duckv1alpha1 "knative.dev/eventing/pkg/apis/duck/v1alpha1"
"knative.dev/eventing/pkg/scheduler"
@@ -67,6 +67,8 @@ type Config struct {
PodCapacity int32 `json:"podCapacity"`
// Autoscaler refresh period
RefreshPeriod time.Duration `json:"refreshPeriod"`
+ // Autoscaler retry period
+ RetryPeriod time.Duration `json:"retryPeriod"`
SchedulerPolicy scheduler.SchedulerPolicyType `json:"schedulerPolicy"`
SchedPolicy *scheduler.SchedulerPolicy `json:"schedPolicy"`
@@ -91,14 +93,14 @@ func New(ctx context.Context, cfg *Config) (scheduler.Scheduler, error) {
scaleCache := scheduler.NewScaleCache(ctx, cfg.StatefulSetNamespace, kubeclient.Get(ctx).AppsV1().StatefulSets(cfg.StatefulSetNamespace), cfg.ScaleCacheConfig)
- stateAccessor := st.NewStateBuilder(ctx, cfg.StatefulSetNamespace, cfg.StatefulSetName, cfg.VPodLister, cfg.PodCapacity, cfg.SchedulerPolicy, cfg.SchedPolicy, cfg.DeschedPolicy, cfg.PodLister, cfg.NodeLister, scaleCache)
+ stateAccessor := st.NewStateBuilder(cfg.StatefulSetName, cfg.VPodLister, cfg.PodCapacity, cfg.SchedulerPolicy, cfg.SchedPolicy, cfg.DeschedPolicy, cfg.PodLister, cfg.NodeLister, scaleCache)
var getReserved GetReserved
cfg.getReserved = func() map[types.NamespacedName]map[string]int32 {
return getReserved()
}
- autoscaler := newAutoscaler(ctx, cfg, stateAccessor, scaleCache)
+ autoscaler := newAutoscaler(cfg, stateAccessor, scaleCache)
var wg sync.WaitGroup
wg.Add(1)
@@ -126,8 +128,6 @@ func (p Pending) Total() int32 {
// StatefulSetScheduler is a scheduler placing VPod into statefulset-managed set of pods
type StatefulSetScheduler struct {
- ctx context.Context
- logger *zap.SugaredLogger
statefulSetName string
statefulSetNamespace string
statefulSetClient clientappsv1.StatefulSetInterface
@@ -171,8 +171,6 @@ func newStatefulSetScheduler(ctx context.Context,
autoscaler Autoscaler) *StatefulSetScheduler {
scheduler := &StatefulSetScheduler{
- ctx: ctx,
- logger: logging.FromContext(ctx),
statefulSetNamespace: cfg.StatefulSetNamespace,
statefulSetName: cfg.StatefulSetName,
statefulSetClient: kubeclient.Get(ctx).AppsV1().StatefulSets(cfg.StatefulSetNamespace),
@@ -193,7 +191,9 @@ func newStatefulSetScheduler(ctx context.Context,
sif.Apps().V1().StatefulSets().Informer().
AddEventHandler(cache.FilteringResourceEventHandler{
FilterFunc: controller.FilterWithNameAndNamespace(cfg.StatefulSetNamespace, cfg.StatefulSetName),
- Handler: controller.HandleAll(scheduler.updateStatefulset),
+ Handler: controller.HandleAll(func(i interface{}) {
+ scheduler.updateStatefulset(ctx, i)
+ }),
})
sif.Start(ctx.Done())
@@ -207,13 +207,13 @@ func newStatefulSetScheduler(ctx context.Context,
return scheduler
}
-func (s *StatefulSetScheduler) Schedule(vpod scheduler.VPod) ([]duckv1alpha1.Placement, error) {
+func (s *StatefulSetScheduler) Schedule(ctx context.Context, vpod scheduler.VPod) ([]duckv1alpha1.Placement, error) {
s.lock.Lock()
defer s.lock.Unlock()
s.reservedMu.Lock()
defer s.reservedMu.Unlock()
- placements, err := s.scheduleVPod(vpod)
+ placements, err := s.scheduleVPod(ctx, vpod)
if placements == nil {
return placements, err
}
@@ -228,11 +228,13 @@ func (s *StatefulSetScheduler) Schedule(vpod scheduler.VPod) ([]duckv1alpha1.Pla
return placements, err
}
-func (s *StatefulSetScheduler) scheduleVPod(vpod scheduler.VPod) ([]duckv1alpha1.Placement, error) {
- logger := s.logger.With("key", vpod.GetKey(), zap.String("component", "scheduler"))
+func (s *StatefulSetScheduler) scheduleVPod(ctx context.Context, vpod scheduler.VPod) ([]duckv1alpha1.Placement, error) {
+ logger := logging.FromContext(ctx).With("key", vpod.GetKey(), zap.String("component", "scheduler"))
+ ctx = logging.WithLogger(ctx, logger)
+
// Get the current placements state
// Quite an expensive operation but safe and simple.
- state, err := s.stateAccessor.State(s.reserved)
+ state, err := s.stateAccessor.State(ctx, s.reserved)
if err != nil {
logger.Debug("error while refreshing scheduler state (will retry)", zap.Error(err))
return nil, err
@@ -270,13 +272,15 @@ func (s *StatefulSetScheduler) scheduleVPod(vpod scheduler.VPod) ([]duckv1alpha1
}
// Handle overcommitted pods.
- if state.FreeCap[ordinal] < 0 {
+ if state.Free(ordinal) < 0 {
// vr > free => vr: 9, overcommit 4 -> free: 0, vr: 5, pending: +4
// vr = free => vr: 4, overcommit 4 -> free: 0, vr: 0, pending: +4
// vr < free => vr: 3, overcommit 4 -> free: -1, vr: 0, pending: +3
overcommit := -state.FreeCap[ordinal]
+ logger.Debugw("overcommit", zap.Any("overcommit", overcommit), zap.Any("placement", p))
+
if p.VReplicas >= overcommit {
state.SetFree(ordinal, 0)
state.Pending[vpod.GetKey()] += overcommit
@@ -313,7 +317,9 @@ func (s *StatefulSetScheduler) scheduleVPod(vpod scheduler.VPod) ([]duckv1alpha1
if state.SchedulerPolicy != "" {
// Need less => scale down
if tr > vpod.GetVReplicas() {
- logger.Debugw("scaling down", zap.Int32("vreplicas", tr), zap.Int32("new vreplicas", vpod.GetVReplicas()))
+ logger.Debugw("scaling down", zap.Int32("vreplicas", tr), zap.Int32("new vreplicas", vpod.GetVReplicas()),
+ zap.Any("placements", placements),
+ zap.Any("existingPlacements", existingPlacements))
placements = s.removeReplicas(tr-vpod.GetVReplicas(), placements)
@@ -323,15 +329,19 @@ func (s *StatefulSetScheduler) scheduleVPod(vpod scheduler.VPod) ([]duckv1alpha1
}
// Need more => scale up
- logger.Debugw("scaling up", zap.Int32("vreplicas", tr), zap.Int32("new vreplicas", vpod.GetVReplicas()))
+ logger.Debugw("scaling up", zap.Int32("vreplicas", tr), zap.Int32("new vreplicas", vpod.GetVReplicas()),
+ zap.Any("placements", placements),
+ zap.Any("existingPlacements", existingPlacements))
placements, left = s.addReplicas(state, vpod.GetVReplicas()-tr, placements)
} else { //Predicates and priorities must be used for scheduling
// Need less => scale down
if tr > vpod.GetVReplicas() && state.DeschedPolicy != nil {
- logger.Infow("scaling down", zap.Int32("vreplicas", tr), zap.Int32("new vreplicas", vpod.GetVReplicas()))
- placements = s.removeReplicasWithPolicy(vpod, tr-vpod.GetVReplicas(), placements)
+ logger.Infow("scaling down", zap.Int32("vreplicas", tr), zap.Int32("new vreplicas", vpod.GetVReplicas()),
+ zap.Any("placements", placements),
+ zap.Any("existingPlacements", existingPlacements))
+ placements = s.removeReplicasWithPolicy(ctx, vpod, tr-vpod.GetVReplicas(), placements)
// Do not trigger the autoscaler to avoid unnecessary churn
@@ -343,8 +353,10 @@ func (s *StatefulSetScheduler) scheduleVPod(vpod scheduler.VPod) ([]duckv1alpha1
// Need more => scale up
// rebalancing needed for all vreps most likely since there are pending vreps from previous reconciliation
// can fall here when vreps scaled up or after eviction
- logger.Infow("scaling up with a rebalance (if needed)", zap.Int32("vreplicas", tr), zap.Int32("new vreplicas", vpod.GetVReplicas()))
- placements, left = s.rebalanceReplicasWithPolicy(vpod, vpod.GetVReplicas(), placements)
+ logger.Infow("scaling up with a rebalance (if needed)", zap.Int32("vreplicas", tr), zap.Int32("new vreplicas", vpod.GetVReplicas()),
+ zap.Any("placements", placements),
+ zap.Any("existingPlacements", existingPlacements))
+ placements, left = s.rebalanceReplicasWithPolicy(ctx, vpod, vpod.GetVReplicas(), placements)
}
}
@@ -355,10 +367,10 @@ func (s *StatefulSetScheduler) scheduleVPod(vpod scheduler.VPod) ([]duckv1alpha1
// Trigger the autoscaler
if s.autoscaler != nil {
logger.Infow("Awaiting autoscaler", zap.Any("placement", placements), zap.Int32("left", left))
- s.autoscaler.Autoscale(s.ctx)
+ s.autoscaler.Autoscale(ctx)
}
- if state.SchedPolicy != nil {
+ if state.SchedulerPolicy == "" && state.SchedPolicy != nil {
logger.Info("reverting to previous placements")
s.reservePlacements(vpod, existingPlacements) // rebalancing doesn't care about new placements since all vreps will be re-placed
return existingPlacements, s.notEnoughPodReplicas(left) // requeue to wait for the autoscaler to do its job
@@ -380,25 +392,25 @@ func toJSONable(pending map[types.NamespacedName]int32) map[string]int32 {
return r
}
-func (s *StatefulSetScheduler) rebalanceReplicasWithPolicy(vpod scheduler.VPod, diff int32, placements []duckv1alpha1.Placement) ([]duckv1alpha1.Placement, int32) {
+func (s *StatefulSetScheduler) rebalanceReplicasWithPolicy(ctx context.Context, vpod scheduler.VPod, diff int32, placements []duckv1alpha1.Placement) ([]duckv1alpha1.Placement, int32) {
s.makeZeroPlacements(vpod, placements)
- placements, diff = s.addReplicasWithPolicy(vpod, diff, make([]duckv1alpha1.Placement, 0)) //start fresh with a new placements list
+ placements, diff = s.addReplicasWithPolicy(ctx, vpod, diff, make([]duckv1alpha1.Placement, 0)) //start fresh with a new placements list
return placements, diff
}
-func (s *StatefulSetScheduler) removeReplicasWithPolicy(vpod scheduler.VPod, diff int32, placements []duckv1alpha1.Placement) []duckv1alpha1.Placement {
- logger := s.logger.Named("remove replicas with policy")
+func (s *StatefulSetScheduler) removeReplicasWithPolicy(ctx context.Context, vpod scheduler.VPod, diff int32, placements []duckv1alpha1.Placement) []duckv1alpha1.Placement {
+ logger := logging.FromContext(ctx).Named("remove replicas with policy")
numVreps := diff
for i := int32(0); i < numVreps; i++ { //deschedule one vreplica at a time
- state, err := s.stateAccessor.State(s.reserved)
+ state, err := s.stateAccessor.State(ctx, s.reserved)
if err != nil {
logger.Info("error while refreshing scheduler state (will retry)", zap.Error(err))
return placements
}
- feasiblePods := s.findFeasiblePods(s.ctx, state, vpod, state.DeschedPolicy)
+ feasiblePods := s.findFeasiblePods(ctx, state, vpod, state.DeschedPolicy)
feasiblePods = s.removePodsNotInPlacement(vpod, feasiblePods)
if len(feasiblePods) == 1 { //nothing to score, remove vrep from that pod
placementPodID := feasiblePods[0]
@@ -409,7 +421,7 @@ func (s *StatefulSetScheduler) removeReplicasWithPolicy(vpod scheduler.VPod, dif
continue
}
- priorityList, err := s.prioritizePods(s.ctx, state, vpod, feasiblePods, state.DeschedPolicy)
+ priorityList, err := s.prioritizePods(ctx, state, vpod, feasiblePods, state.DeschedPolicy)
if err != nil {
logger.Info("error while scoring pods using priorities", zap.Error(err))
s.reservePlacements(vpod, placements)
@@ -455,13 +467,13 @@ func (s *StatefulSetScheduler) removeSelectionFromPlacements(placementPodID int3
return newPlacements
}
-func (s *StatefulSetScheduler) addReplicasWithPolicy(vpod scheduler.VPod, diff int32, placements []duckv1alpha1.Placement) ([]duckv1alpha1.Placement, int32) {
- logger := s.logger.Named("add replicas with policy")
+func (s *StatefulSetScheduler) addReplicasWithPolicy(ctx context.Context, vpod scheduler.VPod, diff int32, placements []duckv1alpha1.Placement) ([]duckv1alpha1.Placement, int32) {
+ logger := logging.FromContext(ctx).Named("add replicas with policy")
numVreps := diff
for i := int32(0); i < numVreps; i++ { //schedule one vreplica at a time (find most suitable pod placement satisying predicates with high score)
// Get the current placements state
- state, err := s.stateAccessor.State(s.reserved)
+ state, err := s.stateAccessor.State(ctx, s.reserved)
if err != nil {
logger.Info("error while refreshing scheduler state (will retry)", zap.Error(err))
return placements, diff
@@ -474,7 +486,7 @@ func (s *StatefulSetScheduler) addReplicasWithPolicy(vpod scheduler.VPod, diff i
break //end the iteration for all vreps since there are not pods
}
- feasiblePods := s.findFeasiblePods(s.ctx, state, vpod, state.SchedPolicy)
+ feasiblePods := s.findFeasiblePods(ctx, state, vpod, state.SchedPolicy)
if len(feasiblePods) == 0 { //no pods available to schedule this vreplica
logger.Info("no feasible pods available to schedule this vreplica")
s.reservePlacements(vpod, placements)
@@ -492,7 +504,7 @@ func (s *StatefulSetScheduler) addReplicasWithPolicy(vpod scheduler.VPod, diff i
continue
} */
- priorityList, err := s.prioritizePods(s.ctx, state, vpod, feasiblePods, state.SchedPolicy)
+ priorityList, err := s.prioritizePods(ctx, state, vpod, feasiblePods, state.SchedPolicy)
if err != nil {
logger.Info("error while scoring pods using priorities", zap.Error(err))
s.reservePlacements(vpod, placements)
@@ -567,7 +579,7 @@ func (s *StatefulSetScheduler) removePodsNotInPlacement(vpod scheduler.VPod, fea
// prioritizePods prioritizes the pods by running the score plugins, which return a score for each pod.
// The scores from each plugin are added together to make the score for that pod.
func (s *StatefulSetScheduler) prioritizePods(ctx context.Context, states *st.State, vpod scheduler.VPod, feasiblePods []int32, policy *scheduler.SchedulerPolicy) (st.PodScoreList, error) {
- logger := s.logger.Named("prioritize all feasible pods")
+ logger := logging.FromContext(ctx).Named("prioritize all feasible pods")
// If no priority configs are provided, then all pods will have a score of one
result := make(st.PodScoreList, 0, len(feasiblePods))
@@ -630,7 +642,7 @@ func (s *StatefulSetScheduler) selectPod(podScoreList st.PodScoreList) (int32, e
// If any of these plugins doesn't return "Success", the pod is not suitable for placing the vrep.
// Meanwhile, the failure message and status are set for the given pod.
func (s *StatefulSetScheduler) RunFilterPlugins(ctx context.Context, states *st.State, vpod scheduler.VPod, podID int32, policy *scheduler.SchedulerPolicy) st.PluginToStatus {
- logger := s.logger.Named("run all filter plugins")
+ logger := logging.FromContext(ctx).Named("run all filter plugins")
statuses := make(st.PluginToStatus)
for _, plugin := range policy.Predicates {
@@ -663,7 +675,7 @@ func (s *StatefulSetScheduler) runFilterPlugin(ctx context.Context, pl st.Filter
// RunScorePlugins runs the set of configured scoring plugins. It returns a list that stores for each scoring plugin name the corresponding PodScoreList(s).
// It also returns *Status, which is set to non-success if any of the plugins returns a non-success status.
func (s *StatefulSetScheduler) RunScorePlugins(ctx context.Context, states *st.State, vpod scheduler.VPod, feasiblePods []int32, policy *scheduler.SchedulerPolicy) (st.PluginToPodScores, *st.Status) {
- logger := s.logger.Named("run all score plugins")
+ logger := logging.FromContext(ctx).Named("run all score plugins")
pluginToPodScores := make(st.PluginToPodScores, len(policy.Priorities))
for _, plugin := range policy.Priorities {
@@ -776,10 +788,11 @@ func (s *StatefulSetScheduler) addReplicas(states *st.State, diff int32, placeme
return newPlacements, diff
}
-func (s *StatefulSetScheduler) updateStatefulset(obj interface{}) {
+func (s *StatefulSetScheduler) updateStatefulset(ctx context.Context, obj interface{}) {
statefulset, ok := obj.(*appsv1.StatefulSet)
if !ok {
- s.logger.Fatalw("expected a Statefulset object", zap.Any("object", obj))
+ logging.FromContext(ctx).Warnw("expected a Statefulset object", zap.Any("object", obj))
+ return
}
s.lock.Lock()
@@ -789,7 +802,7 @@ func (s *StatefulSetScheduler) updateStatefulset(obj interface{}) {
s.replicas = 1
} else if s.replicas != *statefulset.Spec.Replicas {
s.replicas = *statefulset.Spec.Replicas
- s.logger.Infow("statefulset replicas updated", zap.Int32("replicas", s.replicas))
+ logging.FromContext(ctx).Infow("statefulset replicas updated", zap.Int32("replicas", s.replicas))
}
}
diff --git a/vendor/knative.dev/eventing/test/e2e-common.sh b/vendor/knative.dev/eventing/test/e2e-common.sh
index 73b87d3b63..1c78f80da8 100644
--- a/vendor/knative.dev/eventing/test/e2e-common.sh
+++ b/vendor/knative.dev/eventing/test/e2e-common.sh
@@ -187,6 +187,9 @@ function install_knative_eventing() {
UNINSTALL_LIST+=( "${EVENTING_RELEASE_YAML}" )
fi
+ # Workaround for https://github.com/knative/eventing/issues/8161
+ kubectl label namespace "${SYSTEM_NAMESPACE}" bindings.knative.dev/exclude=true --overwrite
+
# Setup config tracing for tracing tests
local TMP_CONFIG_TRACING_CONFIG=${TMP_DIR}/${CONFIG_TRACING_CONFIG##*/}
sed "s/namespace: ${KNATIVE_DEFAULT_NAMESPACE}/namespace: ${SYSTEM_NAMESPACE}/g" "${CONFIG_TRACING_CONFIG}" > "${TMP_CONFIG_TRACING_CONFIG}"
diff --git a/vendor/knative.dev/eventing/test/rekt/features/broker/feature.go b/vendor/knative.dev/eventing/test/rekt/features/broker/feature.go
index dea2fe5fd4..3704da705b 100644
--- a/vendor/knative.dev/eventing/test/rekt/features/broker/feature.go
+++ b/vendor/knative.dev/eventing/test/rekt/features/broker/feature.go
@@ -27,6 +27,7 @@ import (
"github.com/cloudevents/sdk-go/v2/test"
"github.com/google/uuid"
"knative.dev/reconciler-test/pkg/environment"
+ "knative.dev/reconciler-test/pkg/state"
duckv1 "knative.dev/eventing/pkg/apis/duck/v1"
eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1"
@@ -382,8 +383,20 @@ Note: the number denotes the sequence of the event that flows in this test case.
*/
func brokerEventTransformationForTrigger() *feature.Feature {
f := feature.NewFeatureNamed("Broker event transformation for trigger")
+ config := BrokerEventTransformationForTriggerSetup(f)
+ BrokerEventTransformationForTriggerAssert(f, config)
+ return f
+}
- source := feature.MakeRandomK8sName("source")
+type brokerEventTransformationConfig struct {
+ Broker string
+ Sink1 string
+ Sink2 string
+ EventToSend cloudevents.Event
+ TransformedEvent cloudevents.Event
+}
+
+func BrokerEventTransformationForTriggerSetup(f *feature.Feature) brokerEventTransformationConfig {
sink1 := feature.MakeRandomK8sName("sink1")
sink2 := feature.MakeRandomK8sName("sink2")
@@ -391,42 +404,43 @@ func brokerEventTransformationForTrigger() *feature.Feature {
trigger2 := feature.MakeRandomK8sName("trigger2")
// Construct original cloudevent message
- eventType := "type1"
- eventSource := "http://source1.com"
- eventBody := `{"msg":"e2e-brokerchannel-body"}`
- // Construct cloudevent message after transformation
- transformedEventType := "type2"
- transformedEventSource := "http://source2.com"
- transformedBody := `{"msg":"transformed body"}`
-
- // Construct eventToSend
eventToSend := cloudevents.NewEvent()
- eventToSend.SetID(uuid.New().String())
- eventToSend.SetType(eventType)
- eventToSend.SetSource(eventSource)
- eventToSend.SetData(cloudevents.ApplicationJSON, []byte(eventBody))
+ eventToSend.SetType("type1")
+ eventToSend.SetSource("http://source1.com")
+ eventToSend.SetData(cloudevents.ApplicationJSON, []byte(`{"msg":"e2e-brokerchannel-body"}`))
+
+ // Construct cloudevent message after transformation
+ transformedEvent := cloudevents.NewEvent()
+ transformedEvent.SetType("type2")
+ transformedEvent.SetSource("http://source2.com")
+ transformedEvent.SetData(cloudevents.ApplicationJSON, []byte(`{"msg":"transformed body"}`))
//Install the broker
brokerName := feature.MakeRandomK8sName("broker")
+ f.Setup("Set context variables", func(ctx context.Context, t feature.T) {
+ state.SetOrFail(ctx, t, "brokerName", brokerName)
+ state.SetOrFail(ctx, t, "sink1", sink1)
+ state.SetOrFail(ctx, t, "sink2", sink2)
+ })
f.Setup("install broker", broker.Install(brokerName, broker.WithEnvConfig()...))
f.Setup("broker is ready", broker.IsReady(brokerName))
f.Setup("broker is addressable", broker.IsAddressable(brokerName))
f.Setup("install sink1", eventshub.Install(sink1,
- eventshub.ReplyWithTransformedEvent(transformedEventType, transformedEventSource, transformedBody),
+ eventshub.ReplyWithTransformedEvent(transformedEvent.Type(), transformedEvent.Source(), string(transformedEvent.Data())),
eventshub.StartReceiver),
)
f.Setup("install sink2", eventshub.Install(sink2, eventshub.StartReceiver))
// filter1 filters the original events
filter1 := eventingv1.TriggerFilterAttributes{
- "type": eventType,
- "source": eventSource,
+ "type": eventToSend.Type(),
+ "source": eventToSend.Source(),
}
// filter2 filters events after transformation
filter2 := eventingv1.TriggerFilterAttributes{
- "type": transformedEventType,
- "source": transformedEventSource,
+ "type": transformedEvent.Type(),
+ "source": transformedEvent.Source(),
}
// Install the trigger1 point to Broker and transform the original events to new events
@@ -446,33 +460,49 @@ func brokerEventTransformationForTrigger() *feature.Feature {
))
f.Setup("trigger2 goes ready", trigger.IsReady(trigger2))
+ return brokerEventTransformationConfig{
+ Broker: brokerName,
+ Sink1: sink1,
+ Sink2: sink2,
+ EventToSend: eventToSend,
+ TransformedEvent: transformedEvent,
+ }
+}
+
+func BrokerEventTransformationForTriggerAssert(f *feature.Feature,
+ cfg brokerEventTransformationConfig) {
+
+ source := feature.MakeRandomK8sName("source")
+
+ // Set new ID every time we send event to allow calling this function repeatedly
+ cfg.EventToSend.SetID(uuid.New().String())
f.Requirement("install source", eventshub.Install(
source,
- eventshub.StartSenderToResource(broker.GVR(), brokerName),
- eventshub.InputEvent(eventToSend),
+ eventshub.StartSenderToResource(broker.GVR(), cfg.Broker),
+ eventshub.InputEvent(cfg.EventToSend),
))
eventMatcher := eventasssert.MatchEvent(
- test.HasSource(eventSource),
- test.HasType(eventType),
- test.HasData([]byte(eventBody)),
+ test.HasId(cfg.EventToSend.ID()),
+ test.HasSource(cfg.EventToSend.Source()),
+ test.HasType(cfg.EventToSend.Type()),
+ test.HasData(cfg.EventToSend.Data()),
)
transformEventMatcher := eventasssert.MatchEvent(
- test.HasSource(transformedEventSource),
- test.HasType(transformedEventType),
- test.HasData([]byte(transformedBody)),
+ test.HasSource(cfg.TransformedEvent.Source()),
+ test.HasType(cfg.TransformedEvent.Type()),
+ test.HasData(cfg.TransformedEvent.Data()),
)
- f.Stable("Trigger2 has filtered all transformed events").
- Must("delivers original events",
- eventasssert.OnStore(sink2).Match(transformEventMatcher).AtLeast(1))
-
- f.Stable("Trigger2 has no original events").
- Must("delivers original events",
- eventasssert.OnStore(sink2).Match(eventMatcher).Not())
-
- return f
-
+ f.Stable("Trigger has filtered all transformed events").
+ Must("trigger 1 delivers original events",
+ eventasssert.OnStore(cfg.Sink1).Match(eventMatcher).AtLeast(1)).
+ Must("trigger 1 does not deliver transformed events",
+ eventasssert.OnStore(cfg.Sink1).Match(transformEventMatcher).Not()).
+ Must("trigger 2 delivers transformed events",
+ eventasssert.OnStore(cfg.Sink2).Match(transformEventMatcher).AtLeast(1)).
+ Must("trigger 2 does not deliver original events",
+ eventasssert.OnStore(cfg.Sink2).Match(eventMatcher).Not())
}
func BrokerPreferHeaderCheck() *feature.Feature {
diff --git a/vendor/knative.dev/eventing/test/rekt/features/channel/features.go b/vendor/knative.dev/eventing/test/rekt/features/channel/features.go
index bc7238b51e..da8a6e9bf5 100644
--- a/vendor/knative.dev/eventing/test/rekt/features/channel/features.go
+++ b/vendor/knative.dev/eventing/test/rekt/features/channel/features.go
@@ -47,8 +47,16 @@ import (
func ChannelChain(length int, createSubscriberFn func(ref *duckv1.KReference, uri string) manifest.CfgFn) *feature.Feature {
f := feature.NewFeature()
- sink := feature.MakeRandomK8sName("sink")
- cs := feature.MakeRandomK8sName("containersource")
+
+ sink, channel := ChannelChainSetup(f, length, createSubscriberFn)
+
+ ChannelChainAssert(f, sink, channel)
+
+ return f
+}
+
+func ChannelChainSetup(f *feature.Feature, length int, createSubscriberFn func(ref *duckv1.KReference, uri string) manifest.CfgFn) (sink string, channel string) {
+ sink = feature.MakeRandomK8sName("sink")
var channels []string
for i := 0; i < length; i++ {
@@ -79,13 +87,19 @@ func ChannelChain(length int, createSubscriberFn func(ref *duckv1.KReference, ur
f.Setup("subscription is ready", subscription.IsReady(sub))
}
- // attach the first channel to the source
- f.Requirement("install containersource", containersource.Install(cs, containersource.WithSink(channel_impl.AsDestinationRef(channels[0]))))
- f.Requirement("containersource goes ready", containersource.IsReady(cs))
+ return sink, channels[0]
+}
- f.Assert("chained channels relay events", assert.OnStore(sink).MatchEvent(test.HasType("dev.knative.eventing.samples.heartbeat")).AtLeast(1))
+func ChannelChainAssert(f *feature.Feature, sink, channel string) {
+ cs := feature.MakeRandomK8sName("containersource")
+ eventType := feature.MakeRandomK8sName("et")
+ args := "--eventType=" + eventType
+ f.Requirement("install containersource", containersource.Install(cs,
+ containersource.WithSink(channel_impl.AsDestinationRef(channel)),
+ containersource.WithArgs(args)))
+ f.Requirement("containersource goes ready", containersource.IsReady(cs))
- return f
+ f.Assert("chained channels relay events", assert.OnStore(sink).MatchEvent(test.HasType(eventType)).AtLeast(1))
}
func DeadLetterSink(createSubscriberFn func(ref *duckv1.KReference, uri string) manifest.CfgFn) *feature.Feature {
diff --git a/vendor/knative.dev/eventing/test/upgrade/README.md b/vendor/knative.dev/eventing/test/upgrade/README.md
new file mode 100644
index 0000000000..b5398886a8
--- /dev/null
+++ b/vendor/knative.dev/eventing/test/upgrade/README.md
@@ -0,0 +1,142 @@
+# Upgrade Tests
+
+In order to get coverage for the upgrade process from an operator’s perspective,
+we need an additional suite of tests that perform a complete knative upgrade.
+Running these tests on every commit will ensure that we don’t introduce any
+non-upgradeable changes, so every commit should be releasable.
+
+This is inspired by kubernetes
+[upgrade testing](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-testing/e2e-tests.md#version-skewed-and-upgrade-testing)
+.
+
+These tests are a pretty big hammer in that they cover more than just version
+changes, but it’s one of the only ways to make sure we don’t accidentally make
+breaking changes for now.
+
+## Flow
+
+We’d like to validate that the upgrade doesn’t break any resources (they still
+propagate events) and doesn't break our installation (we can still update
+resources).
+
+At a high level, we want to do this:
+
+1. Install the latest knative release.
+1. Create some resources.
+1. Install knative at HEAD.
+1. Run any post-install jobs that apply for the release to be.
+1. Test those resources, verify that we didn’t break anything.
+
+To achieve that, we created an upgrade framework (knative.dev/pkg/test/upgrade).
+This framework will enforce running upgrade tests in specific order and supports
+continual verification of system under test. In case of Eventing it is:
+
+1. Install the latest release from GitHub.
+1. Run the `preupgrade` smoke tests.
+1. Start `continual` tests that will propagate events in the background, while
+ upgrading and downgrading.
+1. Install at HEAD (`ko apply -f config/`) and run the post-install jobs.
+1. Run the `postupgrade` smoke tests.
+1. Install the latest release from GitHub.
+1. Run the `postdowngrade` smoke tests.
+1. Stop and verify `continual` tests, checking if every event propagated well.
+
+## Tests
+
+### Smoke test
+
+This was stolen from the e2e tests as one of the simplest cases.
+
+#### preupgrade, postupgrade, postdowngrade
+
+Run the selected smoke test.
+
+### Probe test
+
+In order to verify that we don't have data-plane unavailability during our
+control-plane outages (when we're upgrading the knative/eventing installation),
+we run a prober test that continually sends events to a service during the
+entire upgrade/downgrade process. When the upgrade completes, we make sure that
+all of those events propagated at least once.
+
+To achieve that
+a [wathola tool](https://pkg.go.dev/knative.dev/eventing/test/upgrade/prober/wathola)
+was prepared. It consists of 4 components: _sender_, _forwarder_, _receiver_,
+and _fetcher_. _Sender_ is the usual Kubernetes deployment that publishes events
+to the System Under Tests (SUT). By default, SUT is a default `broker`
+with two triggers for each type of events being sent. _Sender_ will send events
+with given interval. When it terminates (by either `SIGTERM`, or
+`SIGINT`), a `finished` event is generated. _Forwarder_ is a knative serving
+service that scales up from zero to receive the sent events and forward them to
+given target which is the _receiver_ in our case. _Receiver_ is an ordinary
+deployment that collects events from multiple forwarders and has an
+endpoint `/report` that can be polled to get the status of received events. To
+fetch the report from within the cluster _fetcher_ comes in. It's a simple one
+time job, that will fetch the report from _receiver_ and print it on stdout as
+JSON. That enables the test client to download _fetcher_ logs and parse the JSON
+to get the final report.
+
+Diagram below describe the setup:
+
+```
+ K8s cluster | Test machine
+ |
+(deployment) (ksvc) (deployment) |
++--------+ +-----------+ +----------+ | +------------+
+| | | ++ | | | | |
+| Sender | +-->| Forwarder ||----->+ Receiver | | + TestProber |
+| | | | || | |<---+ | | |
++---+----+ | +------------| +----------+ | | +------------+
+ | | +-----------+ | |
+ | ```````|````````````````````````````` | |
+ | ` | ` +---------+ |
+ | ` +--+-----+ +---------+ ` | | |
+ +-----> | | +-+ ` | Fetcher | |
+ ` | Broker | < - > | Trigger | | ` | | |
+ ` | | | | | ` +---------+ |
+ ` +--------+ +---------+ | ` (job) |
+ ` (default) +----------+ ` |
+ ` (SUT) `
+ `````````````````````````````````````
+```
+
+#### Probe test configuration
+
+Probe test behavior can be influenced from outside without modifying its source
+code. That can be beneficial if one would like to run upgrade tests in different
+context. One such example might be running Eventing upgrade tests in place that
+have Serving and Eventing both installed. In such environment one can set
+environment variable `EVENTING_UPGRADE_TESTS_SERVING_USE` to enable usage of
+ksvc forwarder (which is disabled by default):
+
+```
+$ export EVENTING_UPGRADE_TESTS_SERVING_USE=true
+```
+
+Any option, apart from namespace, in
+[`knative.dev/eventing/test/upgrade/prober.Config`](https://github.com/knative/eventing/blob/022e281/test/upgrade/prober/prober.go#L52-L63)
+struct can be influenced, by using `EVENTING_UPGRADE_TESTS_XXXXX` environmental
+variable prefix (using
+[kelseyhightower/envconfig](https://github.com/kelseyhightower/envconfig#usage)
+usage).
+
+#### Inspecting Zipkin traces for undelivered events
+
+When tracing is enabled in the `config-tracing` config map in the system namespace
+the prober collects traces for undelivered events. The traces are exported as json files
+under the artifacts dir. Traces for each event are stored in a separate file.
+Step event traces are stored as `$ARTIFACTS/traces/missed-events/step-.json`
+The finished event traces are stored as `$ARTIFACTS/traces/missed-events/finished.json`
+
+Traces can be viewed as follows:
+- Start a Zipkin container on localhost:
+ ```
+ $ docker run -d -p 9411:9411 ghcr.io/openzipkin/zipkin:2
+ ```
+- Send traces to the Zipkin endpoint:
+ ```
+ $ curl -v -X POST localhost:9411/api/v2/spans \
+ -H 'Content-Type: application/json' \
+ -d @$ARTIFACTS/traces/missed-events/step-.json
+ ```
+- View traces in Zipkin UI at `http://localhost:9411/zipkin`
diff --git a/vendor/knative.dev/eventing/test/upgrade/continual.go b/vendor/knative.dev/eventing/test/upgrade/continual.go
new file mode 100644
index 0000000000..3dd96e922b
--- /dev/null
+++ b/vendor/knative.dev/eventing/test/upgrade/continual.go
@@ -0,0 +1,28 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package upgrade
+
+import (
+ "knative.dev/eventing/test/upgrade/prober"
+ pkgupgrade "knative.dev/pkg/test/upgrade"
+)
+
+// ContinualTest will perform a continual validation of Eventing SUT.
+func ContinualTest() pkgupgrade.BackgroundOperation {
+ return prober.NewContinualVerification("EventingContinualTest",
+ prober.ContinualVerificationOptions{})
+}
diff --git a/vendor/knative.dev/eventing/test/upgrade/postupgrade.go b/vendor/knative.dev/eventing/test/upgrade/postupgrade.go
new file mode 100644
index 0000000000..f8de52789a
--- /dev/null
+++ b/vendor/knative.dev/eventing/test/upgrade/postupgrade.go
@@ -0,0 +1,31 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package upgrade
+
+import (
+ testlib "knative.dev/eventing/test/lib"
+ "knative.dev/pkg/test/migrate"
+ pkgupgrade "knative.dev/pkg/test/upgrade"
+)
+
+func CRDPostUpgradeTest() pkgupgrade.Operation {
+ return pkgupgrade.NewOperation("PostUpgradeCRDTest", func(c pkgupgrade.Context) {
+ client := testlib.Setup(c.T, true)
+ defer testlib.TearDown(client)
+ migrate.ExpectSingleStoredVersion(c.T, client.Apiextensions.CustomResourceDefinitions(), "knative.dev")
+ })
+}
diff --git a/vendor/knative.dev/eventing/test/upgrade/prober/forwarder.go b/vendor/knative.dev/eventing/test/upgrade/prober/forwarder.go
index a7ee1d1ab6..1e96d7168f 100644
--- a/vendor/knative.dev/eventing/test/upgrade/prober/forwarder.go
+++ b/vendor/knative.dev/eventing/test/upgrade/prober/forwarder.go
@@ -20,6 +20,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+
testlib "knative.dev/eventing/test/lib"
"knative.dev/eventing/test/lib/duck"
"knative.dev/eventing/test/lib/resources"
@@ -69,6 +70,9 @@ func (p *prober) forwarderKService(name, namespace string) *unstructured.Unstruc
"spec": map[string]interface{}{
"template": map[string]interface{}{
"metadata": map[string]interface{}{
+ "labels": map[string]interface{}{
+ "sidecar.istio.io/inject": "true",
+ },
"annotations": map[string]interface{}{
"sidecar.istio.io/inject": "true",
"sidecar.istio.io/rewriteAppHTTPProbers": "true",
diff --git a/vendor/knative.dev/eventing/test/upgrade/prober/receiver.go b/vendor/knative.dev/eventing/test/upgrade/prober/receiver.go
index ddd75fd664..47c574e2d8 100644
--- a/vendor/knative.dev/eventing/test/upgrade/prober/receiver.go
+++ b/vendor/knative.dev/eventing/test/upgrade/prober/receiver.go
@@ -91,7 +91,8 @@ func (p *prober) createReceiverDeployment() *appsv1.Deployment {
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
- "app": receiver.Name,
+ "app": receiver.Name,
+ "sidecar.istio.io/inject": "true",
},
Annotations: map[string]string{
"sidecar.istio.io/inject": "true",
diff --git a/vendor/knative.dev/eventing/test/upgrade/prober/sender.go b/vendor/knative.dev/eventing/test/upgrade/prober/sender.go
index df9173a1f0..4e1a26bc2c 100644
--- a/vendor/knative.dev/eventing/test/upgrade/prober/sender.go
+++ b/vendor/knative.dev/eventing/test/upgrade/prober/sender.go
@@ -52,7 +52,8 @@ func (p *prober) deploySender() {
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
- "app": sender.Name,
+ "app": sender.Name,
+ "sidecar.istio.io/inject": "true",
},
Annotations: map[string]string{
"sidecar.istio.io/inject": "true",
diff --git a/vendor/knative.dev/eventing/test/upgrade/prober/verify.go b/vendor/knative.dev/eventing/test/upgrade/prober/verify.go
index 9810daf8cc..069e3a1821 100644
--- a/vendor/knative.dev/eventing/test/upgrade/prober/verify.go
+++ b/vendor/knative.dev/eventing/test/upgrade/prober/verify.go
@@ -255,7 +255,8 @@ func (p *prober) deployFetcher() *batchv1.Job {
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
- "app": fetcherName,
+ "app": fetcherName,
+ "sidecar.istio.io/inject": "true",
},
Annotations: map[string]string{
"sidecar.istio.io/inject": "true",
diff --git a/vendor/knative.dev/eventing/test/upgrade/upgrade.go b/vendor/knative.dev/eventing/test/upgrade/upgrade.go
new file mode 100644
index 0000000000..cc9b6bf29f
--- /dev/null
+++ b/vendor/knative.dev/eventing/test/upgrade/upgrade.go
@@ -0,0 +1,331 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package upgrade
+
+import (
+ "context"
+ "log"
+ "os"
+ "sync"
+ "testing"
+
+ "knative.dev/eventing/pkg/apis/eventing"
+ brokerfeatures "knative.dev/eventing/test/rekt/features/broker"
+ "knative.dev/eventing/test/rekt/features/channel"
+ brokerresources "knative.dev/eventing/test/rekt/resources/broker"
+ "knative.dev/eventing/test/rekt/resources/channel_impl"
+ "knative.dev/eventing/test/rekt/resources/subscription"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+ "knative.dev/pkg/system"
+ pkgupgrade "knative.dev/pkg/test/upgrade"
+ "knative.dev/pkg/test/zipkin"
+ "knative.dev/reconciler-test/pkg/environment"
+ "knative.dev/reconciler-test/pkg/feature"
+ "knative.dev/reconciler-test/pkg/k8s"
+ "knative.dev/reconciler-test/pkg/knative"
+ "knative.dev/reconciler-test/pkg/manifest"
+)
+
+var (
+ channelConfigMux = &sync.Mutex{}
+ brokerConfigMux = &sync.Mutex{}
+ opts = []environment.EnvOpts{
+ knative.WithKnativeNamespace(system.Namespace()),
+ knative.WithLoggingConfig,
+ knative.WithTracingConfig,
+ k8s.WithEventListener,
+ }
+)
+
+// RunMainTest expects flags to be already initialized.
+// This function needs to be exposed, so that test cases in other repositories can call the upgrade
+// main tests in eventing.
+func RunMainTest(m *testing.M) {
+ os.Exit(func() int {
+ // Any tests may SetupZipkinTracing, it will only actually be done once. This should be the ONLY
+ // place that cleans it up. If an individual test calls this instead, then it will break other
+ // tests that need the tracing in place.
+ defer zipkin.CleanupZipkinTracingSetup(log.Printf)
+ return m.Run()
+ }())
+}
+
+// DurableFeature holds the setup and verify phase of a feature. The "setup" phase should set up
+// the feature. The "verify" phase should only verify its function. This function should be
+// idempotent. Calling this function multiple times should still properly verify the feature
+// (e.g. one call after upgrade, one call after downgrade).
+type DurableFeature struct {
+ SetupF *feature.Feature
+ // EnvOpts should never include environment.Managed or environment.Cleanup as these functions
+ // break the functionality.
+ EnvOpts []environment.EnvOpts
+ setupEnv environment.Environment
+ setupCtx context.Context
+ VerifyF func() *feature.Feature
+ Global environment.GlobalEnvironment
+}
+
+func (fe *DurableFeature) Setup(label string) pkgupgrade.Operation {
+ return pkgupgrade.NewOperation(label, func(c pkgupgrade.Context) {
+ c.T.Parallel()
+ ctx, env := fe.Global.Environment(
+ fe.EnvOpts...,
+ // Not managed - namespace preserved.
+ )
+ fe.setupEnv = env
+ fe.setupCtx = ctx
+ env.Test(ctx, c.T, fe.SetupF)
+ })
+}
+
+func (fe *DurableFeature) Verify(label string) pkgupgrade.Operation {
+ return pkgupgrade.NewOperation(label, func(c pkgupgrade.Context) {
+ c.T.Parallel()
+ fe.setupEnv.Test(fe.setupCtx, c.T, fe.VerifyF())
+ })
+}
+
+func (fe *DurableFeature) VerifyAndTeardown(label string) pkgupgrade.Operation {
+ return pkgupgrade.NewOperation(label, func(c pkgupgrade.Context) {
+ c.T.Parallel()
+ fe.setupEnv.Test(fe.setupCtx, c.T, fe.VerifyF())
+ // Ensures teardown of resources/namespace.
+ fe.setupEnv.Finish()
+ })
+}
+
+func (fe *DurableFeature) SetupVerifyAndTeardown(label string) pkgupgrade.Operation {
+ return pkgupgrade.NewOperation(label, func(c pkgupgrade.Context) {
+ c.T.Parallel()
+ ctx, env := fe.Global.Environment(
+ append(fe.EnvOpts, environment.Managed(c.T))...,
+ )
+ env.Test(ctx, c.T, fe.SetupF)
+ env.Test(ctx, c.T, fe.VerifyF())
+ })
+}
+
+type FeatureWithUpgradeTests interface {
+ PreUpgradeTests() []pkgupgrade.Operation
+ PostUpgradeTests() []pkgupgrade.Operation
+ PostDowngradeTests() []pkgupgrade.Operation
+}
+
+// NewFeatureOnlyUpgrade decorates a feature with these actions:
+// Pre-upgrade: Setup, Verify
+// Post-upgrade: Verify, Teardown
+// Post-downgrade: no-op.
+func NewFeatureOnlyUpgrade(f *DurableFeature) FeatureWithUpgradeTests {
+ return featureOnlyUpgrade{
+ label: "OnlyUpgrade",
+ feature: f,
+ }
+}
+
+type featureOnlyUpgrade struct {
+ label string
+ feature *DurableFeature
+}
+
+func (f featureOnlyUpgrade) PreUpgradeTests() []pkgupgrade.Operation {
+ return []pkgupgrade.Operation{
+ f.feature.Setup(f.label),
+ }
+}
+
+func (f featureOnlyUpgrade) PostUpgradeTests() []pkgupgrade.Operation {
+ return []pkgupgrade.Operation{
+ f.feature.VerifyAndTeardown(f.label),
+ }
+}
+
+func (f featureOnlyUpgrade) PostDowngradeTests() []pkgupgrade.Operation {
+ // No-op. Teardown was done post-upgrade.
+ return nil
+}
+
+// NewFeatureUpgradeDowngrade decorates a feature with these actions:
+// Pre-upgrade: Setup, Verify.
+// Post-upgrade: Verify.
+// Post-downgrade: Verify, Teardown.
+func NewFeatureUpgradeDowngrade(f *DurableFeature) FeatureWithUpgradeTests {
+ return featureUpgradeDowngrade{
+ label: "BothUpgradeDowngrade",
+ feature: f,
+ }
+}
+
+type featureUpgradeDowngrade struct {
+ label string
+ feature *DurableFeature
+}
+
+func (f featureUpgradeDowngrade) PreUpgradeTests() []pkgupgrade.Operation {
+ return []pkgupgrade.Operation{
+ f.feature.Setup(f.label),
+ }
+}
+
+func (f featureUpgradeDowngrade) PostUpgradeTests() []pkgupgrade.Operation {
+ // PostUpgrade only asserts existing resources. Teardown will be done post-downgrade.
+ return []pkgupgrade.Operation{
+ f.feature.Verify(f.label),
+ }
+}
+
+func (f featureUpgradeDowngrade) PostDowngradeTests() []pkgupgrade.Operation {
+ return []pkgupgrade.Operation{
+ f.feature.VerifyAndTeardown(f.label),
+ }
+}
+
+// NewFeatureOnlyDowngrade decorates a feature with these actions:
+// Pre-upgrade: no-op.
+// Post-upgrade: Setup, Verify.
+// Post-downgrade: Verify, Teardown.
+func NewFeatureOnlyDowngrade(f *DurableFeature) FeatureWithUpgradeTests {
+ return featureOnlyDowngrade{
+ label: "OnlyDowngrade",
+ feature: f,
+ }
+}
+
+type featureOnlyDowngrade struct {
+ label string
+ feature *DurableFeature
+}
+
+func (f featureOnlyDowngrade) PreUpgradeTests() []pkgupgrade.Operation {
+ // No-op. Resources will be created post-upgrade.
+ return nil
+}
+
+func (f featureOnlyDowngrade) PostUpgradeTests() []pkgupgrade.Operation {
+ // Resources created post-upgrade.
+ return []pkgupgrade.Operation{
+ f.feature.Setup(f.label),
+ }
+}
+
+func (f featureOnlyDowngrade) PostDowngradeTests() []pkgupgrade.Operation {
+ // Assert and Teardown is done post-downgrade.
+ return []pkgupgrade.Operation{
+ f.feature.VerifyAndTeardown(f.label),
+ }
+}
+
+// NewFeatureSmoke decorates a feature with these actions:
+// Pre-upgrade: no-op.
+// Post-upgrade: Setup, Verify, Teardown.
+// Post-downgrade: Setup, Verify, Teardown.
+func NewFeatureSmoke(f *DurableFeature) FeatureWithUpgradeTests {
+ return featureSmoke{
+ label: "Smoke",
+ feature: f,
+ }
+}
+
+type featureSmoke struct {
+ label string
+ feature *DurableFeature
+}
+
+func (f featureSmoke) PreUpgradeTests() []pkgupgrade.Operation {
+ // No-op. No need to smoke test before upgrade.
+ return nil
+}
+
+func (f featureSmoke) PostUpgradeTests() []pkgupgrade.Operation {
+ return []pkgupgrade.Operation{
+ f.feature.SetupVerifyAndTeardown(f.label),
+ }
+}
+
+func (f featureSmoke) PostDowngradeTests() []pkgupgrade.Operation {
+ return []pkgupgrade.Operation{
+ f.feature.SetupVerifyAndTeardown(f.label),
+ }
+}
+
+// FeatureGroupWithUpgradeTests aggregates tests across a group of features.
+type FeatureGroupWithUpgradeTests []FeatureWithUpgradeTests
+
+func (fg FeatureGroupWithUpgradeTests) PreUpgradeTests() []pkgupgrade.Operation {
+ ops := make([]pkgupgrade.Operation, 0, len(fg))
+ for _, ft := range fg {
+ ops = append(ops, ft.PreUpgradeTests()...)
+ }
+ return ops
+}
+
+func (fg FeatureGroupWithUpgradeTests) PostUpgradeTests() []pkgupgrade.Operation {
+ ops := make([]pkgupgrade.Operation, 0, len(fg))
+ for _, ft := range fg {
+ ops = append(ops, ft.PostUpgradeTests()...)
+ }
+ return ops
+}
+
+func (fg FeatureGroupWithUpgradeTests) PostDowngradeTests() []pkgupgrade.Operation {
+ ops := make([]pkgupgrade.Operation, 0, len(fg))
+ for _, ft := range fg {
+ ops = append(ops, ft.PostDowngradeTests()...)
+ }
+ return ops
+}
+
+func InMemoryChannelFeature(glob environment.GlobalEnvironment) *DurableFeature {
+ // Prevent race conditions on channel_impl.EnvCfg.ChannelGK when running tests in parallel.
+ channelConfigMux.Lock()
+ defer channelConfigMux.Unlock()
+ channel_impl.EnvCfg.ChannelGK = "InMemoryChannel.messaging.knative.dev"
+ channel_impl.EnvCfg.ChannelV = "v1"
+
+ createSubscriberFn := func(ref *duckv1.KReference, uri string) manifest.CfgFn {
+ return subscription.WithSubscriber(ref, uri, "")
+ }
+
+ setupF := feature.NewFeature()
+ sink, ch := channel.ChannelChainSetup(setupF, 1, createSubscriberFn)
+
+ verifyF := func() *feature.Feature {
+ f := feature.NewFeatureNamed(setupF.Name)
+ channel.ChannelChainAssert(f, sink, ch)
+ return f
+ }
+
+ return &DurableFeature{SetupF: setupF, VerifyF: verifyF, Global: glob, EnvOpts: opts}
+}
+
+func BrokerEventTransformationForTrigger(glob environment.GlobalEnvironment,
+) *DurableFeature {
+ // Prevent race conditions on EnvCfg.BrokerClass when running tests in parallel.
+ brokerConfigMux.Lock()
+ defer brokerConfigMux.Unlock()
+ brokerresources.EnvCfg.BrokerClass = eventing.MTChannelBrokerClassValue
+
+ setupF := feature.NewFeature()
+ cfg := brokerfeatures.BrokerEventTransformationForTriggerSetup(setupF)
+
+ verifyF := func() *feature.Feature {
+ f := feature.NewFeatureNamed(setupF.Name)
+ brokerfeatures.BrokerEventTransformationForTriggerAssert(f, cfg)
+ return f
+ }
+
+ return &DurableFeature{SetupF: setupF, VerifyF: verifyF, Global: glob, EnvOpts: opts}
+}
diff --git a/vendor/knative.dev/hack/codegen-library.sh b/vendor/knative.dev/hack/codegen-library.sh
index 01a43132dc..42ece681ce 100644
--- a/vendor/knative.dev/hack/codegen-library.sh
+++ b/vendor/knative.dev/hack/codegen-library.sh
@@ -133,16 +133,18 @@ function restore-changes-if-its-copyright-year-only() {
local difflist
log "Cleaning up generated code"
difflist="$(mktemp)"
- git diff --exit-code --name-only > "$difflist"
- # list git changes and skip those which differ only in the boilerplate year
- while read -r file; do
- # check if the file contains just the change in the boilerplate year
- if [ "$(LANG=C git diff --exit-code --shortstat -- "$file")" = ' 1 file changed, 1 insertion(+), 1 deletion(-)' ] && \
- [[ "$(git diff --exit-code -U1 -- "$file" | grep -Ec '^[+-]\s*[*#]?\s*Copyright 2[0-9]{3}')" -eq 2 ]]; then
- # restore changes to that file
- git checkout -- "$file"
- fi
- done < "$difflist"
+ if ! git diff --exit-code --name-only > /dev/null; then
+ # list git changes and skip those which differ only in the boilerplate year
+ git diff --name-only > "$difflist"
+ while read -r file; do
+ # check if the file contains just the change in the boilerplate year
+ if [ "$(LANG=C git diff --exit-code --shortstat -- "$file")" = ' 1 file changed, 1 insertion(+), 1 deletion(-)' ] && \
+ [[ "$(git diff --exit-code -U1 -- "$file" | grep -Ec '^[+-]\s*[*#]?\s*Copyright 2[0-9]{3}')" -eq 2 ]]; then
+ # restore changes to that file
+ git checkout -- "$file"
+ fi
+ done < "$difflist"
+ fi
rm -f "$difflist"
}
diff --git a/vendor/knative.dev/hack/library.sh b/vendor/knative.dev/hack/library.sh
index 755ae33d7e..46ac2f46fa 100644
--- a/vendor/knative.dev/hack/library.sh
+++ b/vendor/knative.dev/hack/library.sh
@@ -775,7 +775,7 @@ function go_update_deps() {
function __clean_goworksum_if_exists() {
if [ -f "$REPO_ROOT_DIR/go.work.sum" ]; then
log.step 'Cleaning the go.work.sum file'
- truncate --size 0 "$REPO_ROOT_DIR/go.work.sum"
+ truncate -s 0 "$REPO_ROOT_DIR/go.work.sum"
fi
}
diff --git a/vendor/knative.dev/pkg/test/crd.go b/vendor/knative.dev/pkg/test/crd.go
index eb7c0e0b3d..941ec7c21e 100644
--- a/vendor/knative.dev/pkg/test/crd.go
+++ b/vendor/knative.dev/pkg/test/crd.go
@@ -74,9 +74,9 @@ func CoreV1ObjectReference(kind, apiversion, name string) *corev1.ObjectReferenc
func NginxPod(namespace string) *corev1.Pod {
return &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
- Name: nginxName,
- Namespace: namespace,
- Annotations: map[string]string{"sidecar.istio.io/inject": "true"},
+ Name: nginxName,
+ Namespace: namespace,
+ Labels: map[string]string{"sidecar.istio.io/inject": "true"},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
diff --git a/vendor/knative.dev/pkg/test/migrate/checks.go b/vendor/knative.dev/pkg/test/migrate/checks.go
new file mode 100644
index 0000000000..3778743fe0
--- /dev/null
+++ b/vendor/knative.dev/pkg/test/migrate/checks.go
@@ -0,0 +1,53 @@
+package migrate
+
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+import (
+ "context"
+ "strings"
+ "testing"
+
+ apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// ExpectSingleStoredVersion verifies that status.storedVersions on specific CRDs has only one version
+// and the version is listed in spec.Versions with storage: true. It means the CRDs
+// have been migrated and previous/unused API versions can be safely removed from the spec.
+func ExpectSingleStoredVersion(t *testing.T, crdClient apiextensionsv1.CustomResourceDefinitionInterface, crdGroup string) {
+ t.Helper()
+
+ crdList, err := crdClient.List(context.Background(), metav1.ListOptions{})
+ if err != nil {
+ t.Fatal("Unable to fetch crd list:", err)
+ }
+
+ for _, crd := range crdList.Items {
+ if strings.Contains(crd.Name, crdGroup) {
+ if len(crd.Status.StoredVersions) != 1 {
+ t.Errorf("%q does not have a single stored version: %+v", crd.Name, crd)
+ }
+ stored := crd.Status.StoredVersions[0]
+ for _, v := range crd.Spec.Versions {
+ if stored == v.Name && !v.Storage {
+ t.Errorf("%q is invalid: spec.versions.storage must be true for %q or "+
+ "version %q must be removed from status.storageVersions: %+v", crd.Name, v.Name, v.Name, crd)
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/knative.dev/pkg/webhook/resourcesemantics/defaulting/controller.go b/vendor/knative.dev/pkg/webhook/resourcesemantics/defaulting/controller.go
index f318d02316..ed55fd4708 100644
--- a/vendor/knative.dev/pkg/webhook/resourcesemantics/defaulting/controller.go
+++ b/vendor/knative.dev/pkg/webhook/resourcesemantics/defaulting/controller.go
@@ -100,9 +100,10 @@ func newController(ctx context.Context, name string, optsFunc ...OptionFunc) *co
handlers: opts.types,
callbacks: opts.callbacks,
- withContext: opts.wc,
- disallowUnknownFields: opts.disallowUnknownFields,
- secretName: wopts.SecretName,
+ withContext: opts.wc,
+ disallowUnknownFields: opts.disallowUnknownFields,
+ secretName: wopts.SecretName,
+ disableNamespaceOwnership: wopts.DisableNamespaceOwnership,
client: client,
mwhlister: mwhInformer.Lister(),
diff --git a/vendor/knative.dev/pkg/webhook/resourcesemantics/defaulting/defaulting.go b/vendor/knative.dev/pkg/webhook/resourcesemantics/defaulting/defaulting.go
index 4140ec7192..6aa08b4b94 100644
--- a/vendor/knative.dev/pkg/webhook/resourcesemantics/defaulting/defaulting.go
+++ b/vendor/knative.dev/pkg/webhook/resourcesemantics/defaulting/defaulting.go
@@ -69,8 +69,9 @@ type reconciler struct {
mwhlister admissionlisters.MutatingWebhookConfigurationLister
secretlister corelisters.SecretLister
- disallowUnknownFields bool
- secretName string
+ disallowUnknownFields bool
+ secretName string
+ disableNamespaceOwnership bool
}
// CallbackFunc is the function to be invoked.
@@ -218,12 +219,14 @@ func (ac *reconciler) reconcileMutatingWebhook(ctx context.Context, caCert []byt
current := configuredWebhook.DeepCopy()
- ns, err := ac.client.CoreV1().Namespaces().Get(ctx, system.Namespace(), metav1.GetOptions{})
- if err != nil {
- return fmt.Errorf("failed to fetch namespace: %w", err)
+ if !ac.disableNamespaceOwnership {
+ ns, err := ac.client.CoreV1().Namespaces().Get(ctx, system.Namespace(), metav1.GetOptions{})
+ if err != nil {
+ return fmt.Errorf("failed to fetch namespace: %w", err)
+ }
+ nsRef := *metav1.NewControllerRef(ns, corev1.SchemeGroupVersion.WithKind("Namespace"))
+ current.OwnerReferences = []metav1.OwnerReference{nsRef}
}
- nsRef := *metav1.NewControllerRef(ns, corev1.SchemeGroupVersion.WithKind("Namespace"))
- current.OwnerReferences = []metav1.OwnerReference{nsRef}
for i, wh := range current.Webhooks {
if wh.Name != current.Name {
diff --git a/vendor/knative.dev/pkg/webhook/resourcesemantics/validation/controller.go b/vendor/knative.dev/pkg/webhook/resourcesemantics/validation/controller.go
index f24b36792a..c8afa5c138 100644
--- a/vendor/knative.dev/pkg/webhook/resourcesemantics/validation/controller.go
+++ b/vendor/knative.dev/pkg/webhook/resourcesemantics/validation/controller.go
@@ -86,9 +86,10 @@ func newController(ctx context.Context, name string, optsFunc ...OptionFunc) *co
handlers: opts.types,
callbacks: opts.callbacks,
- withContext: opts.wc,
- disallowUnknownFields: opts.DisallowUnknownFields(),
- secretName: woptions.SecretName,
+ withContext: opts.wc,
+ disallowUnknownFields: opts.DisallowUnknownFields(),
+ secretName: woptions.SecretName,
+ disableNamespaceOwnership: woptions.DisableNamespaceOwnership,
client: client,
vwhlister: vwhInformer.Lister(),
diff --git a/vendor/knative.dev/pkg/webhook/resourcesemantics/validation/reconcile_config.go b/vendor/knative.dev/pkg/webhook/resourcesemantics/validation/reconcile_config.go
index afbc45c051..9f3114d4c6 100644
--- a/vendor/knative.dev/pkg/webhook/resourcesemantics/validation/reconcile_config.go
+++ b/vendor/knative.dev/pkg/webhook/resourcesemantics/validation/reconcile_config.go
@@ -60,8 +60,9 @@ type reconciler struct {
vwhlister admissionlisters.ValidatingWebhookConfigurationLister
secretlister corelisters.SecretLister
- disallowUnknownFields bool
- secretName string
+ disallowUnknownFields bool
+ secretName string
+ disableNamespaceOwnership bool
}
var (
@@ -193,13 +194,15 @@ func (ac *reconciler) reconcileValidatingWebhook(ctx context.Context, caCert []b
current := configuredWebhook.DeepCopy()
- // Set the owner to namespace.
- ns, err := ac.client.CoreV1().Namespaces().Get(ctx, system.Namespace(), metav1.GetOptions{})
- if err != nil {
- return fmt.Errorf("failed to fetch namespace: %w", err)
+ if !ac.disableNamespaceOwnership {
+ // Set the owner to namespace.
+ ns, err := ac.client.CoreV1().Namespaces().Get(ctx, system.Namespace(), metav1.GetOptions{})
+ if err != nil {
+ return fmt.Errorf("failed to fetch namespace: %w", err)
+ }
+ nsRef := *metav1.NewControllerRef(ns, corev1.SchemeGroupVersion.WithKind("Namespace"))
+ current.OwnerReferences = []metav1.OwnerReference{nsRef}
}
- nsRef := *metav1.NewControllerRef(ns, corev1.SchemeGroupVersion.WithKind("Namespace"))
- current.OwnerReferences = []metav1.OwnerReference{nsRef}
for i, wh := range current.Webhooks {
if wh.Name != current.Name {
diff --git a/vendor/knative.dev/pkg/webhook/webhook.go b/vendor/knative.dev/pkg/webhook/webhook.go
index e05c6f041e..1b90e75fca 100644
--- a/vendor/knative.dev/pkg/webhook/webhook.go
+++ b/vendor/knative.dev/pkg/webhook/webhook.go
@@ -81,6 +81,10 @@ type Options struct {
// before shutting down.
GracePeriod time.Duration
+ // DisableNamespaceOwnership configures whether the webhook adds an owner reference for the SYSTEM_NAMESPACE
+ // Disabling this is useful when you expect the webhook configuration to be managed by something other than knative
+ DisableNamespaceOwnership bool
+
// ControllerOptions encapsulates options for creating a new controller,
// including throttling and stats behavior.
ControllerOptions *controller.ControllerOptions
diff --git a/vendor/knative.dev/reconciler-test/pkg/eventshub/103-pod.yaml b/vendor/knative.dev/reconciler-test/pkg/eventshub/103-pod.yaml
index 83b2c55fba..c780e86ab6 100644
--- a/vendor/knative.dev/reconciler-test/pkg/eventshub/103-pod.yaml
+++ b/vendor/knative.dev/reconciler-test/pkg/eventshub/103-pod.yaml
@@ -19,6 +19,9 @@ metadata:
namespace: {{ .namespace }}
labels:
app: eventshub-{{ .name }}
+ {{ range $key, $value := .labels }}
+ {{ $key }}: "{{ $value }}"
+ {{ end }}
{{ if .annotations }}
annotations:
{{ range $key, $value := .annotations }}
diff --git a/vendor/knative.dev/reconciler-test/pkg/eventshub/104-forwarder.yaml b/vendor/knative.dev/reconciler-test/pkg/eventshub/104-forwarder.yaml
index e014d5fb9b..a34e713249 100644
--- a/vendor/knative.dev/reconciler-test/pkg/eventshub/104-forwarder.yaml
+++ b/vendor/knative.dev/reconciler-test/pkg/eventshub/104-forwarder.yaml
@@ -25,12 +25,20 @@ metadata:
{{ end }}
spec:
template:
- {{ if .podannotations }}
+ {{ if or .podannotations .podlabels }}
metadata:
+ {{ if .podannotations }}
annotations:
{{ range $key, $value := .podannotations }}
{{ $key }}: "{{ $value }}"
- {{ end }}
+ {{ end }}
+ {{ end }}
+ {{ if .podlabels }}
+ labels:
+ {{ range $key, $value := .podlabels }}
+ {{ $key }}: "{{ $value }}"
+ {{ end }}
+ {{ end }}
{{ end }}
spec:
serviceAccountName: "{{ .name }}"
diff --git a/vendor/knative.dev/reconciler-test/pkg/eventshub/resources.go b/vendor/knative.dev/reconciler-test/pkg/eventshub/resources.go
index d87e2f5ffb..c34f55c8f1 100644
--- a/vendor/knative.dev/reconciler-test/pkg/eventshub/resources.go
+++ b/vendor/knative.dev/reconciler-test/pkg/eventshub/resources.go
@@ -168,6 +168,7 @@ func Install(name string, options ...EventsHubOption) feature.StepFn {
if ic := environment.GetIstioConfig(ctx); ic.Enabled {
manifest.WithIstioPodAnnotations(cfg)
+ manifest.WithIstioPodLabels(cfg)
}
manifest.PodSecurityCfgFn(ctx, t)(cfg)
diff --git a/vendor/knative.dev/reconciler-test/pkg/manifest/manifest.go b/vendor/knative.dev/reconciler-test/pkg/manifest/manifest.go
index 2fc8f700e9..8276d51c8b 100644
--- a/vendor/knative.dev/reconciler-test/pkg/manifest/manifest.go
+++ b/vendor/knative.dev/reconciler-test/pkg/manifest/manifest.go
@@ -86,10 +86,14 @@ func (f *YamlManifest) Apply(spec *unstructured.Unstructured) error {
if err != nil {
return err
}
+ gvr, _ := meta.UnsafeGuessKindToResource(spec.GroupVersionKind())
if current == nil {
f.log.Info("Creating type ", spec.GroupVersionKind(), " name ", spec.GetName())
- gvr, _ := meta.UnsafeGuessKindToResource(spec.GroupVersionKind())
if _, err := f.client.Resource(gvr).Namespace(spec.GetNamespace()).Create(context.Background(), spec, v1.CreateOptions{}); err != nil {
+ // We might be applying the same resource in parallel, in that case, update the resource.
+ if errors.IsAlreadyExists(err) {
+ return f.Apply(spec)
+ }
return fmt.Errorf("failed to create resource %v - Resource:\n%s", err, toYaml(spec))
}
} else {
@@ -97,7 +101,6 @@ func (f *YamlManifest) Apply(spec *unstructured.Unstructured) error {
if UpdateChanged(spec.UnstructuredContent(), current.UnstructuredContent()) {
f.log.Info("Updating type ", spec.GroupVersionKind(), " name ", spec.GetName())
- gvr, _ := meta.UnsafeGuessKindToResource(spec.GroupVersionKind())
if _, err = f.client.Resource(gvr).Namespace(current.GetNamespace()).Update(context.Background(), current, v1.UpdateOptions{}); err != nil {
return fmt.Errorf("failed to update resource %v - Resource:\n%s", err, toYaml(spec))
}
diff --git a/vendor/knative.dev/reconciler-test/pkg/manifest/options.go b/vendor/knative.dev/reconciler-test/pkg/manifest/options.go
index bb51fb58a3..6e0aa92b9c 100644
--- a/vendor/knative.dev/reconciler-test/pkg/manifest/options.go
+++ b/vendor/knative.dev/reconciler-test/pkg/manifest/options.go
@@ -63,6 +63,21 @@ func WithPodAnnotations(additional map[string]interface{}) CfgFn {
}
}
+// WithPodLabels appends pod labels (usually used by types where pod template is embedded)
+func WithPodLabels(additional map[string]string) CfgFn {
+ return func(cfg map[string]interface{}) {
+ if ann, ok := cfg["podlabels"]; ok {
+ m := make(map[string]interface{}, len(additional))
+ for k, v := range additional {
+ m[k] = v
+ }
+ appendToOriginal(ann, m)
+ return
+ }
+ cfg["podlabels"] = additional
+ }
+}
+
func appendToOriginal(original interface{}, additional map[string]interface{}) {
annotations := original.(map[string]interface{})
for k, v := range additional {
@@ -92,3 +107,12 @@ func WithIstioPodAnnotations(cfg map[string]interface{}) {
WithAnnotations(podAnnotations)(cfg)
WithPodAnnotations(podAnnotations)(cfg)
}
+
+func WithIstioPodLabels(cfg map[string]interface{}) {
+ podLabels := map[string]string{
+ "sidecar.istio.io/inject": "true",
+ }
+
+ WithLabels(podLabels)(cfg)
+ WithPodLabels(podLabels)(cfg)
+}
diff --git a/vendor/knative.dev/reconciler-test/pkg/resources/job/job.go b/vendor/knative.dev/reconciler-test/pkg/resources/job/job.go
index b9237df3dd..aee655360d 100644
--- a/vendor/knative.dev/reconciler-test/pkg/resources/job/job.go
+++ b/vendor/knative.dev/reconciler-test/pkg/resources/job/job.go
@@ -47,6 +47,7 @@ func Install(name string, image string, options ...manifest.CfgFn) feature.StepF
if ic := environment.GetIstioConfig(ctx); ic.Enabled {
manifest.WithIstioPodAnnotations(cfg)
+ manifest.WithIstioPodLabels(cfg)
}
manifest.PodSecurityCfgFn(ctx, t)(cfg)
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 177489b7c3..03629bf354 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -7,7 +7,7 @@ contrib.go.opencensus.io/exporter/prometheus
# contrib.go.opencensus.io/exporter/zipkin v0.1.2
## explicit
contrib.go.opencensus.io/exporter/zipkin
-# github.com/IBM/sarama v1.43.1
+# github.com/IBM/sarama v1.43.3
## explicit; go 1.19
github.com/IBM/sarama
# github.com/ahmetb/gen-crd-api-reference-docs v0.3.1-0.20210420163308-c1402a70e2f1
@@ -83,7 +83,7 @@ github.com/coreos/go-oidc/v3/oidc
# github.com/davecgh/go-spew v1.1.1
## explicit
github.com/davecgh/go-spew/spew
-# github.com/eapache/go-resiliency v1.6.0
+# github.com/eapache/go-resiliency v1.7.0
## explicit; go 1.13
github.com/eapache/go-resiliency/breaker
# github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3
@@ -129,7 +129,7 @@ github.com/go-openapi/jsonreference/internal
# github.com/go-openapi/swag v0.23.0
## explicit; go 1.20
github.com/go-openapi/swag
-# github.com/gobuffalo/flect v1.0.2
+# github.com/gobuffalo/flect v1.0.3
## explicit; go 1.16
github.com/gobuffalo/flect
# github.com/gogo/protobuf v1.3.2
@@ -256,7 +256,7 @@ github.com/kedacore/keda/v2/apis/keda/v1alpha1
# github.com/kelseyhightower/envconfig v1.4.0
## explicit
github.com/kelseyhightower/envconfig
-# github.com/klauspost/compress v1.17.8
+# github.com/klauspost/compress v1.17.9
## explicit; go 1.20
github.com/klauspost/compress
github.com/klauspost/compress/flate
@@ -420,8 +420,8 @@ go.opentelemetry.io/otel/trace/embedded
# go.uber.org/atomic v1.10.0
## explicit; go 1.18
go.uber.org/atomic
-# go.uber.org/automaxprocs v1.5.3
-## explicit; go 1.18
+# go.uber.org/automaxprocs v1.6.0
+## explicit; go 1.20
go.uber.org/automaxprocs/internal/cgroups
go.uber.org/automaxprocs/internal/runtime
go.uber.org/automaxprocs/maxprocs
@@ -441,16 +441,16 @@ go.uber.org/zap/internal/stacktrace
go.uber.org/zap/internal/ztest
go.uber.org/zap/zapcore
go.uber.org/zap/zaptest
-# golang.org/x/crypto v0.26.0
+# golang.org/x/crypto v0.27.0
## explicit; go 1.20
golang.org/x/crypto/md4
golang.org/x/crypto/pbkdf2
-# golang.org/x/mod v0.20.0
-## explicit; go 1.18
+# golang.org/x/mod v0.21.0
+## explicit; go 1.22.0
golang.org/x/mod/internal/lazyregexp
golang.org/x/mod/module
golang.org/x/mod/semver
-# golang.org/x/net v0.28.0
+# golang.org/x/net v0.29.0
## explicit; go 1.18
golang.org/x/net/http/httpguts
golang.org/x/net/http2
@@ -469,15 +469,15 @@ golang.org/x/oauth2/internal
## explicit; go 1.18
golang.org/x/sync/errgroup
golang.org/x/sync/semaphore
-# golang.org/x/sys v0.24.0
+# golang.org/x/sys v0.25.0
## explicit; go 1.18
golang.org/x/sys/plan9
golang.org/x/sys/unix
golang.org/x/sys/windows
-# golang.org/x/term v0.23.0
+# golang.org/x/term v0.24.0
## explicit; go 1.18
golang.org/x/term
-# golang.org/x/text v0.17.0
+# golang.org/x/text v0.18.0
## explicit; go 1.18
golang.org/x/text/secure/bidirule
golang.org/x/text/transform
@@ -486,8 +486,8 @@ golang.org/x/text/unicode/norm
# golang.org/x/time v0.6.0
## explicit; go 1.18
golang.org/x/time/rate
-# golang.org/x/tools v0.24.0
-## explicit; go 1.19
+# golang.org/x/tools v0.25.0
+## explicit; go 1.22.0
golang.org/x/tools/go/ast/astutil
golang.org/x/tools/go/gcexportdata
golang.org/x/tools/go/packages
@@ -514,13 +514,13 @@ gomodules.xyz/jsonpatch/v2
# google.golang.org/api v0.183.0
## explicit; go 1.20
google.golang.org/api/support/bundler
-# google.golang.org/genproto/googleapis/api v0.0.0-20240808171019-573a1156607a
-## explicit; go 1.20
+# google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142
+## explicit; go 1.21
google.golang.org/genproto/googleapis/api/httpbody
-# google.golang.org/genproto/googleapis/rpc v0.0.0-20240808171019-573a1156607a
-## explicit; go 1.20
+# google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142
+## explicit; go 1.21
google.golang.org/genproto/googleapis/rpc/status
-# google.golang.org/grpc v1.65.0
+# google.golang.org/grpc v1.67.0
## explicit; go 1.21
google.golang.org/grpc
google.golang.org/grpc/attributes
@@ -538,7 +538,9 @@ google.golang.org/grpc/credentials
google.golang.org/grpc/credentials/insecure
google.golang.org/grpc/encoding
google.golang.org/grpc/encoding/proto
+google.golang.org/grpc/experimental/stats
google.golang.org/grpc/grpclog
+google.golang.org/grpc/grpclog/internal
google.golang.org/grpc/health/grpc_health_v1
google.golang.org/grpc/internal
google.golang.org/grpc/internal/backoff
@@ -561,11 +563,13 @@ google.golang.org/grpc/internal/resolver/dns/internal
google.golang.org/grpc/internal/resolver/passthrough
google.golang.org/grpc/internal/resolver/unix
google.golang.org/grpc/internal/serviceconfig
+google.golang.org/grpc/internal/stats
google.golang.org/grpc/internal/status
google.golang.org/grpc/internal/syscall
google.golang.org/grpc/internal/transport
google.golang.org/grpc/internal/transport/networktype
google.golang.org/grpc/keepalive
+google.golang.org/grpc/mem
google.golang.org/grpc/metadata
google.golang.org/grpc/peer
google.golang.org/grpc/resolver
@@ -1156,7 +1160,7 @@ k8s.io/utils/pointer
k8s.io/utils/ptr
k8s.io/utils/strings/slices
k8s.io/utils/trace
-# knative.dev/eventing v0.42.1-0.20240827090532-ecae8953ff0c
+# knative.dev/eventing v0.42.1-0.20240926123447-e7fca7646f4a
## explicit; go 1.22.0
knative.dev/eventing/cmd/event_display
knative.dev/eventing/cmd/heartbeats
@@ -1332,6 +1336,7 @@ knative.dev/eventing/test/test_images/wathola-fetcher
knative.dev/eventing/test/test_images/wathola-forwarder
knative.dev/eventing/test/test_images/wathola-receiver
knative.dev/eventing/test/test_images/wathola-sender
+knative.dev/eventing/test/upgrade
knative.dev/eventing/test/upgrade/prober
knative.dev/eventing/test/upgrade/prober/sut
knative.dev/eventing/test/upgrade/prober/wathola/client
@@ -1341,10 +1346,10 @@ knative.dev/eventing/test/upgrade/prober/wathola/fetcher
knative.dev/eventing/test/upgrade/prober/wathola/forwarder
knative.dev/eventing/test/upgrade/prober/wathola/receiver
knative.dev/eventing/test/upgrade/prober/wathola/sender
-# knative.dev/hack v0.0.0-20240814130635-06f7aff93954
+# knative.dev/hack v0.0.0-20240909014011-fc6a8452af6d
## explicit; go 1.21
knative.dev/hack
-# knative.dev/pkg v0.0.0-20240815051656-89743d9bbf7c
+# knative.dev/pkg v0.0.0-20240930065954-503173341499
## explicit; go 1.22.0
knative.dev/pkg/apiextensions/storageversion
knative.dev/pkg/apiextensions/storageversion/cmd/migrate
@@ -1440,6 +1445,7 @@ knative.dev/pkg/test/helpers
knative.dev/pkg/test/ingress
knative.dev/pkg/test/logging
knative.dev/pkg/test/logstream/v2
+knative.dev/pkg/test/migrate
knative.dev/pkg/test/monitoring
knative.dev/pkg/test/prow
knative.dev/pkg/test/security
@@ -1460,7 +1466,7 @@ knative.dev/pkg/webhook/json
knative.dev/pkg/webhook/resourcesemantics
knative.dev/pkg/webhook/resourcesemantics/defaulting
knative.dev/pkg/webhook/resourcesemantics/validation
-# knative.dev/reconciler-test v0.0.0-20240820100420-036ce14b8617
+# knative.dev/reconciler-test v0.0.0-20240926123451-87d857060042
## explicit; go 1.22.0
knative.dev/reconciler-test/cmd/eventshub
knative.dev/reconciler-test/pkg/environment