From 87e822ebb9f9e6a456f02fdd4992b5a6cab346e5 Mon Sep 17 00:00:00 2001 From: Qinyu Cai Date: Sat, 8 Jul 2023 08:15:42 +0800 Subject: [PATCH] feat: kbcli support to create kafka (#4175) Co-authored-by: L.Dongming --- Makefile | 4 +- apis/apps/v1alpha1/zz_generated.deepcopy.go | 36 +++ .../templates/cluster.yaml | 1 + deploy/kafka-cluster/Chart.yaml | 10 +- deploy/kafka-cluster/templates/cluster.yaml | 164 ++++---------- deploy/kafka-cluster/templates/rbac.yaml | 1 + deploy/kafka-cluster/templates/role.yaml | 14 -- .../kafka-cluster/templates/rolebinding.yaml | 14 -- .../templates/serviceaccount.yaml | 6 - .../templates/tests/test-connection.yaml | 15 -- deploy/kafka-cluster/templates/validate.yaml | 20 +- deploy/kafka-cluster/values.schema.json | 140 ++++++++++++ deploy/kafka-cluster/values.yaml | 210 +++--------------- deploy/kafka/Chart.yaml | 2 +- .../kafka/configs/kafka-server-constraint.cue | 4 +- deploy/kafka/configs/kafka-server.prop.tpl | 7 +- .../kafka/scripts/kafka-sasl-sample.prop.tpl | 4 +- .../kafka/scripts/kafka-server-setup.sh.tpl | 4 +- deploy/kafka/templates/NOTES.txt | 2 +- deploy/kafka/templates/clusterdefinition.yaml | 14 +- deploy/kafka/templates/clusterversion.yaml | 2 +- deploy/kafka/values.yaml | 3 +- deploy/kblib/templates/_resources.tpl | 10 +- deploy/kblib/templates/_storages.tpl | 13 ++ .../tdengine-cluster/templates/cluster.yaml | 1 + docs/user_docs/cli/kbcli_cluster_create.md | 1 + internal/cli/cluster/kafka.go | 34 +++ .../cli/cmd/cluster/create_subcmds_test.go | 15 +- 28 files changed, 358 insertions(+), 393 deletions(-) create mode 100644 deploy/kafka-cluster/templates/rbac.yaml delete mode 100644 deploy/kafka-cluster/templates/role.yaml delete mode 100644 deploy/kafka-cluster/templates/rolebinding.yaml delete mode 100644 deploy/kafka-cluster/templates/serviceaccount.yaml delete mode 100644 deploy/kafka-cluster/templates/tests/test-connection.yaml create mode 100644 deploy/kafka-cluster/values.schema.json create mode 100644 deploy/kblib/templates/_storages.tpl create mode 100644 internal/cli/cluster/kafka.go diff --git a/Makefile b/Makefile index e7c777d06e9..8200d4829db 100644 --- a/Makefile +++ b/Makefile @@ -289,10 +289,10 @@ build-single-kbcli-embed-chart.%: .PHONY: build-kbcli-embed-chart build-kbcli-embed-chart: helmtool create-kbcli-embed-charts-dir \ build-single-kbcli-embed-chart.apecloud-mysql-cluster \ - build-single-kbcli-embed-chart.redis-cluster + build-single-kbcli-embed-chart.redis-cluster \ + build-single-kbcli-embed-chart.kafka-cluster # build-single-kbcli-embed-chart.postgresql-cluster \ # build-single-kbcli-embed-chart.clickhouse-cluster \ -# build-single-kbcli-embed-chart.kafka-cluster \ # build-single-kbcli-embed-chart.mongodb-cluster \ # build-single-kbcli-embed-chart.milvus-cluster \ # build-single-kbcli-embed-chart.qdrant-cluster \ diff --git a/apis/apps/v1alpha1/zz_generated.deepcopy.go b/apis/apps/v1alpha1/zz_generated.deepcopy.go index b68eed7410b..bfbadb1eb74 100644 --- a/apis/apps/v1alpha1/zz_generated.deepcopy.go +++ b/apis/apps/v1alpha1/zz_generated.deepcopy.go @@ -2064,6 +2064,11 @@ func (in *LastComponentConfiguration) DeepCopyInto(out *LastComponentConfigurati **out = **in } in.ResourceRequirements.DeepCopyInto(&out.ResourceRequirements) + if in.ClassDefRef != nil { + in, out := &in.ClassDefRef, &out.ClassDefRef + *out = new(ClassDefRef) + **out = **in + } if in.VolumeClaimTemplates != nil { in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates *out = make([]OpsRequestVolumeClaimTemplate, len(*in)) @@ -2765,6 +2770,7 @@ func (in *ResourceConstraint) DeepCopyInto(out *ResourceConstraint) { *out = *in in.CPU.DeepCopyInto(&out.CPU) in.Memory.DeepCopyInto(&out.Memory) + in.Storage.DeepCopyInto(&out.Storage) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceConstraint. @@ -3022,6 +3028,31 @@ func (in *StatelessSetSpec) DeepCopy() *StatelessSetSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageConstraint) DeepCopyInto(out *StorageConstraint) { + *out = *in + if in.Min != nil { + in, out := &in.Min, &out.Min + x := (*in).DeepCopy() + *out = &x + } + if in.Max != nil { + in, out := &in.Max, &out.Max + x := (*in).DeepCopy() + *out = &x + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageConstraint. +func (in *StorageConstraint) DeepCopy() *StorageConstraint { + if in == nil { + return nil + } + out := new(StorageConstraint) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Switchover) DeepCopyInto(out *Switchover) { *out = *in @@ -3366,6 +3397,11 @@ func (in *VerticalScaling) DeepCopyInto(out *VerticalScaling) { *out = *in out.ComponentOps = in.ComponentOps in.ResourceRequirements.DeepCopyInto(&out.ResourceRequirements) + if in.ClassDefRef != nil { + in, out := &in.ClassDefRef, &out.ClassDefRef + *out = new(ClassDefRef) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerticalScaling. diff --git a/deploy/apecloud-mysql-cluster/templates/cluster.yaml b/deploy/apecloud-mysql-cluster/templates/cluster.yaml index bdb476924d5..3f72364e71d 100644 --- a/deploy/apecloud-mysql-cluster/templates/cluster.yaml +++ b/deploy/apecloud-mysql-cluster/templates/cluster.yaml @@ -22,6 +22,7 @@ spec: - error serviceAccountName: {{ include "kblib.serviceAccountName" . }} {{- include "kblib.componentResources" . | indent 6 }} + {{- include "kblib.componentStorages" . | indent 6 }} {{- include "kblib.componentServices" . | indent 6 }} {{- if and (eq .Values.mode "raftGroup") .Values.proxyEnabled }} {{- include "apecloud-mysql-cluster.proxyComponents" . | indent 4 }} diff --git a/deploy/kafka-cluster/Chart.yaml b/deploy/kafka-cluster/Chart.yaml index a04774c54c0..266f0b6176d 100644 --- a/deploy/kafka-cluster/Chart.yaml +++ b/deploy/kafka-cluster/Chart.yaml @@ -1,13 +1,15 @@ apiVersion: v2 name: kafka-cluster description: A Kafka server cluster Helm chart for KubeBlocks. +dependencies: + - name: kblib + version: 0.1.0 + repository: file://../kblib + alias: extra type: application - version: 0.6.0-alpha.28 - -appVersion: 3.4.0 - +appVersion: 3.3.2 home: https://kubeblocks.io/ icon: https://bitnami.com/assets/stacks/kafka/img/kafka-stack-220x234.png diff --git a/deploy/kafka-cluster/templates/cluster.yaml b/deploy/kafka-cluster/templates/cluster.yaml index 61f6597fdf5..5feeb30398a 100644 --- a/deploy/kafka-cluster/templates/cluster.yaml +++ b/deploy/kafka-cluster/templates/cluster.yaml @@ -1,192 +1,116 @@ apiVersion: apps.kubeblocks.io/v1alpha1 kind: Cluster metadata: - name: {{ include "clustername" . }} - labels: {{ include "kafka-cluster.labels" . | nindent 4 }} + name: {{ include "kblib.clusterName" . }} + labels: {{ include "kblib.clusterLabels" . | nindent 4 }} + annotations: + "kubeblocks.io/extra-env": '{"KB_KAFKA_ENABLE_SASL":"{{ $.Values.saslEnable }}"}' spec: clusterDefinitionRef: kafka # ref clusterdefinition.name - clusterVersionRef: kafka-{{ default .Chart.AppVersion .Values.clusterVersionOverride }} - terminationPolicy: {{ $.Values.terminationPolicy }} - affinity: - {{- with $.Values.topologyKeys }} - topologyKeys: {{ . | toYaml | nindent 6 }} - {{- end }} - {{- with $.Values.tolerations }} - tolerations: {{ . | toYaml | nindent 4 }} - {{- end }} + clusterVersionRef: {{ .Values.version }} + terminationPolicy: {{ .Values.extra.terminationPolicy }} + {{- include "kblib.affinity" . | indent 2 }} componentSpecs: {{- if eq "combined" $.Values.mode }} - name: broker componentDefRef: kafka-server - tls: {{ $.Values.tls }} - {{- if $.Values.tls }} + tls: {{ $.Values.tlsEnable }} + {{- if $.Values.tlsEnable }} issuer: name: KubeBlocks {{- end }} - replicas: {{ $.Values.kafkaServer.replicaCount }} - monitor: {{ $.Values.monitor.enabled }} - {{- with $.Values.tolerations }} - tolerations: {{ .| toYaml | nindent 8 }} - {{- end }} - {{- with $.Values.kafkaServer.resources }} - resources: - limits: - cpu: {{ .limits.cpu | quote }} - memory: {{ .limits.memory | quote }} - requests: - cpu: {{ .requests.cpu | quote }} - memory: {{ .requests.memory | quote }} - {{- end }} - {{- with $.Values.persistence }} - {{- if .enabled }} + replicas: {{ $.Values.replicas }} + monitor: {{ $.Values.monitorEnable }} + serviceAccountName: {{ include "kblib.serviceAccountName" . }} + {{- include "kblib.componentResources" . | indent 6 }} + {{- include "kblib.componentServices" . | indent 6 }} + {{- if $.Values.storageEnable }} volumeClaimTemplates: - name: metadata spec: - {{- with .metadata.storageClassName }} - storageClassName: {{ . }} - {{- end }} accessModes: - ReadWriteOnce resources: requests: - storage: {{ .metadata.size }} + storage: {{ print $.Values.metaStorage "Gi" }} - name: data spec: - {{- with .data.storageClassName }} - storageClassName: {{ . }} - {{- end }} accessModes: - ReadWriteOnce resources: requests: - storage: {{ .data.size }} - {{- if .log.enabled }} + storage: {{ print $.Values.dataStorage "Gi" }} - name: log spec: - {{- with .log.storageClassName }} - storageClassName: {{ . }} - {{- end }} accessModes: - ReadWriteOnce resources: requests: - storage: {{ .log.size }} - {{- end }} + storage: {{ print $.Values.logStorage "Gi" }} {{- end }} - {{- end }} {{- else }} - - name: kafka-kraft - componentDefRef: kafka-kraft - tls: {{ $.Values.tls }} - {{- if $.Values.tls }} + - name: controller + componentDefRef: controller + tls: {{ $.Values.tlsEnable }} + {{- if $.Values.tlsEnable }} issuer: name: KubeBlocks {{- end }} - replicas: {{ $.Values.kafkaController.replicaCount }} - monitor: {{ $.Values.monitor.enabled }} - {{- with $.Values.tolerations }} - tolerations: {{ .| toYaml | nindent 8 }} - {{- end }} - {{- with $.Values.kafkaController.resources }} - resources: - limits: - cpu: {{ .limits.cpu | quote }} - memory: {{ .limits.memory | quote }} - requests: - cpu: {{ .requests.cpu | quote }} - memory: {{ .requests.memory | quote }} - {{- end }} - {{- with $.Values.persistence }} - {{- if .enabled }} + replicas: {{ $.Values.controllerReplicas }} + monitor: {{ $.Values.monitorEnable }} + serviceAccountName: {{ include "kblib.serviceAccountName" . }} + {{- include "kblib.componentResources" . | indent 6 }} + {{- if $.Values.storageEnable }} volumeClaimTemplates: - name: metadata spec: - {{- with .metadata.storageClassName }} - storageClassName: {{ . }} - {{- end }} accessModes: - ReadWriteOnce resources: requests: - storage: {{ .metadata.size }} - {{- if .log.enabled }} - - name: log + storage: {{ print $.Values.metaStorage "Gi" }} + - name: log spec: - {{- with .log.storageClassName }} - storageClassName: {{ . }} - {{- end }} accessModes: - ReadWriteOnce resources: requests: - storage: {{ .log.size }} - {{- end }} - {{- end }} + storage: {{ print $.Values.logStorage "Gi" }} {{- end }} - name: broker componentDefRef: kafka-broker - tls: {{ $.Values.tls }} - {{- if $.Values.tls }} + tls: {{ $.Values.tlsEnable }} + {{- if $.Values.tlsEnable }} issuer: name: KubeBlocks {{- end }} - replicas: {{ $.Values.kafkaBroker.replicaCount }} - monitor: {{ $.Values.monitor.enabled }} - {{- with $.Values.tolerations }} - tolerations: {{ .| toYaml | nindent 8 }} - {{- end }} - {{- with $.Values.kafkaBroker.resources }} - resources: - limits: - cpu: {{ .limits.cpu | quote }} - memory: {{ .limits.memory | quote }} - requests: - cpu: {{ .requests.cpu | quote }} - memory: {{ .requests.memory | quote }} - {{- end }} - {{- with $.Values.persistence }} - {{- if .enabled }} + replicas: {{ $.Values.brokerReplicas }} + monitor: {{ $.Values.monitorEnable }} + serviceAccountName: {{ include "kblib.serviceAccountName" . }} + {{- include "kblib.componentResources" . | indent 6 }} + {{- include "kblib.componentServices" . | indent 6 }} + {{- if $.Values.storageEnable }} volumeClaimTemplates: - name: data spec: - {{- with .data.storageClassName }} - storageClassName: {{ . }} - {{- end }} accessModes: - ReadWriteOnce resources: requests: - storage: {{ .data.size }} - {{- if .log.enabled }} - - name: log + storage: {{ print $.Values.metaStorage "Gi" }} + - name: log spec: - {{- with .log.storageClassName }} - storageClassName: {{ . }} - {{- end }} accessModes: - ReadWriteOnce resources: requests: - storage: {{ .log.size }} - {{- end }} + storage: {{ print $.Values.logStorage "Gi" }} {{- end }} {{- end }} - {{- end }} - {{- if .Values.monitor.enabled }} + {{- if .Values.monitorEnable }} - name: metrics-exp componentDefRef: kafka-exporter - replicas: {{ $.Values.monitor.kafkaExporter.replicaCount }} + replicas: {{ $.Values.monitorReplicas }} monitor: true - {{- with $.Values.tolerations }} - tolerations: {{ .| toYaml | nindent 8 }} - {{- end }} - {{- with $.Values.monitor.kafkaExporter.resources }} - resources: - limits: - cpu: {{ .limits.cpu | quote }} - memory: {{ .limits.memory | quote }} - requests: - cpu: {{ .requests.cpu | quote }} - memory: {{ .requests.memory | quote }} - {{- end }} + {{- include "kblib.componentResources" . | nindent 6 }} {{- end }} \ No newline at end of file diff --git a/deploy/kafka-cluster/templates/rbac.yaml b/deploy/kafka-cluster/templates/rbac.yaml new file mode 100644 index 00000000000..08875e8bf00 --- /dev/null +++ b/deploy/kafka-cluster/templates/rbac.yaml @@ -0,0 +1 @@ +{{- include "kblib.rbac" . }} \ No newline at end of file diff --git a/deploy/kafka-cluster/templates/role.yaml b/deploy/kafka-cluster/templates/role.yaml deleted file mode 100644 index 751e6011d82..00000000000 --- a/deploy/kafka-cluster/templates/role.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: kb-{{ include "clustername" . }} - namespace: {{ .Release.Namespace }} - labels: - {{ include "kafka-cluster.labels" . | nindent 4 }} -rules: - - apiGroups: - - "" - resources: - - events - verbs: - - create diff --git a/deploy/kafka-cluster/templates/rolebinding.yaml b/deploy/kafka-cluster/templates/rolebinding.yaml deleted file mode 100644 index dddde6ac65f..00000000000 --- a/deploy/kafka-cluster/templates/rolebinding.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: kb-{{ include "clustername" . }} - labels: - {{ include "kafka-cluster.labels" . | nindent 4 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: kb-{{ include "clustername" . }} -subjects: - - kind: ServiceAccount - name: {{ include "kafka-cluster.serviceAccountName" . }} - namespace: {{ .Release.Namespace }} diff --git a/deploy/kafka-cluster/templates/serviceaccount.yaml b/deploy/kafka-cluster/templates/serviceaccount.yaml deleted file mode 100644 index b24c1376d5d..00000000000 --- a/deploy/kafka-cluster/templates/serviceaccount.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "kafka-cluster.serviceAccountName" . }} - labels: - {{ include "kafka-cluster.labels" . | nindent 4 }} diff --git a/deploy/kafka-cluster/templates/tests/test-connection.yaml b/deploy/kafka-cluster/templates/tests/test-connection.yaml deleted file mode 100644 index b65b2ab607c..00000000000 --- a/deploy/kafka-cluster/templates/tests/test-connection.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: "{{ include "clustername" . }}-test-connection" - labels: - {{- include "kafka-cluster.labels" . | nindent 4 }} - annotations: - "helm.sh/hook": test -spec: - containers: - - name: wget - image: busybox - command: ['wget'] - args: ['{{ include "clustername" . }}:{{ .Values.service.port }}'] - restartPolicy: Never diff --git a/deploy/kafka-cluster/templates/validate.yaml b/deploy/kafka-cluster/templates/validate.yaml index 0558d745961..dda2a77698d 100644 --- a/deploy/kafka-cluster/templates/validate.yaml +++ b/deploy/kafka-cluster/templates/validate.yaml @@ -1,16 +1,22 @@ -{{- if .Values.kafkaServer.replicaCount }} - {{- if gt (int .Values.kafkaServer.replicaCount) 3 }} +{{- if .Values.replicas }} + {{- if gt (int .Values.replicas) 5 }} {{ fail "kafka-server cluster does not support running replicas greater than 3." }} {{- end }} - {{- if ne (mod (int .Values.kafkaServer.replicaCount) 2) 1 }} + {{- if ne (mod (int .Values.replicas) 2) 1 }} {{ fail "kafka-server cluster does not support running with even number replicas." }} {{- end }} {{- end }} -{{- if .Values.kafkaController.replicaCount }} - {{- if gt (int .Values.kafkaController.replicaCount) 3 }} - {{ fail "kafka-controller cluster does not support running replicas greater than 3." }} + +{{- if .Values.brokerReplicas }} + {{- if gt (int .Values.brokerReplicas) 100 }} + {{ fail "kafka-broker cluster does not support running replicas greater than 100." }} + {{- end }} +{{- end }} +{{- if .Values.controllerReplicas }} + {{- if gt (int .Values.controllerReplicas) 5 }} + {{ fail "kafka-controller cluster does not support running replicas greater than 5." }} {{- end }} - {{- if ne (mod (int .Values.kafkaController.replicaCount) 2) 1 }} + {{- if ne (mod (int .Values.controllerReplicas) 2) 1 }} {{ fail "kafka-controller cluster does not support running with even number replicas." }} {{- end }} {{- end }} diff --git a/deploy/kafka-cluster/values.schema.json b/deploy/kafka-cluster/values.schema.json new file mode 100644 index 00000000000..92d34fa3186 --- /dev/null +++ b/deploy/kafka-cluster/values.schema.json @@ -0,0 +1,140 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "version": { + "title": "Version", + "description": "Cluster version.", + "type": "string", + "default": "kafka-3.3.2" + }, + "mode": { + "title": "Mode", + "description": "Mode for Kafka kraft cluster, 'combined' is combined Kafka controller and broker,'separated' is broker and controller running independently.", + "type": "string", + "default": "combined", + "enum": [ + "combined", + "separated" + ] + }, + "tlsEnable": { + "title": "TlsEnable", + "description": "Enable TLS for Kafka.", + "type": "boolean", + "default": false + }, + "saslEnable": { + "title": "SaslEnable", + "description": "Enable authentication using SASL/PLAIN for Kafka.", + "type": "boolean", + "default": false + }, + "monitorEnable": { + "title": "MonitorEnable", + "description": "Enable monitor for Kafka.", + "type": "boolean", + "default": false + }, + "storageEnable": { + "title": "StorageEnable", + "description": "Enable storage for Kafka meta/data/log.", + "type": "boolean", + "default": false + }, + "metaStorage": { + "title": "MetaStorage(Gi)", + "description": "Meta Storage size, the unit is Gi.", + "type": [ + "number", + "string" + ], + "default": 5, + "minimum": 1, + "maximum": 10000 + }, + "dataStorage": { + "title": "DataStorage(Gi)", + "description": "Data Storage size, the unit is Gi.", + "type": [ + "number", + "string" + ], + "default": 10, + "minimum": 1, + "maximum": 10000 + }, + "logStorage": { + "title": "LogStorage(Gi)", + "description": "Log Storage size, the unit is Gi.", + "type": [ + "number", + "string" + ], + "default": 2, + "minimum": 1, + "maximum": 10000 + }, + "replicas": { + "title": "Replicas", + "description": "The number of Kafka replicas for combined mode.", + "type": "integer", + "default": 1, + "enum": [ + 1, + 3, + 5 + ] + }, + "brokerReplicas": { + "title": "BrokerReplicas", + "description": "The number of Kafka broker replicas for separated mode.", + "type": "integer", + "default": 1, + "minimum": 1, + "maximum": 100 + }, + "controllerReplicas": { + "title": "ControllerReplicas", + "description": "The number of Kafka controller replicas for separated mode.", + "type": "integer", + "default": 1, + "enum": [ + 1, + 3, + 5 + ] + }, + "monitorReplicas": { + "title": "MonitorReplicas", + "description": "The number of Kafka monitor replicas.", + "type": "integer", + "default": 1, + "minimum": 1, + "maximum": 5 + }, + "cpu": { + "title": "CPU", + "description": "CPU cores.", + "type": [ + "number", + "string" + ], + "default": 1, + "minimum": 0.5, + "maximum": 64, + "multipleOf": 0.5 + }, + "memory": { + "title": "Memory(Gi)", + "description": "Memory, the unit is Gi.", + "type": [ + "number", + "string" + ], + "default": 1, + "minimum": 0.5, + "maximum": 1000 + } + } +} diff --git a/deploy/kafka-cluster/values.yaml b/deploy/kafka-cluster/values.yaml index 98474f375a0..6cb17ffe3a8 100644 --- a/deploy/kafka-cluster/values.yaml +++ b/deploy/kafka-cluster/values.yaml @@ -1,199 +1,59 @@ -## @param terminationPolicy define Cluster termination policy. One of DoNotTerminate, Halt, Delete, WipeOut. +## @param version Kafka cluster version ## -terminationPolicy: Halt +version: kafka-3.3.2 -## @param mode for Kafka cluster mode, 'combined' is combined Kafka controller (KRaft) and broker, +## @param mode for Kafka cluster mode, 'combined' is combined Kafka controller (KRaft) and broker, ## 'separated' is a Kafka KRaft and Kafka broker cluster. ## mode: combined # Enable TLS # Todo: Monitoring is not supported when tls is enabled -tls: false - -## Enable persistence using Persistent Volume Claims -## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ -## -## @param persistence.enabled Enable persistence using Persistent Volume Claims -## @param persistence.data.size Size of data volume -## @param persistence.data.storageClassName Storage class of backing PVC for data volume -## @param persistence.log.size Size of log volume -## @param persistence.log.storageClassName Storage class of backing PVC for log volume -persistence: - ## @param persistence.enabled Enable persistence using Persistent Volume Claims - ## - enabled: true - ## `metadata` volume settings - ## - metadata: - ## @param persistence.metadata.preferStorageClassNames - preferStorageClassNames: - - kafka-meta-eks - - kafka-meta-aks - - kafka-meta-gke - - kafka-meta-ack - - kafka-meta-tke - - ## @param persistence.data.storageClassName Storage class of backing PVC - ## @param persistence.data.storageClassName Storage class of backing PVC - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - storageClassName: - ## @param persistence.data.size Size of data volume - ## - size: 5Gi - ## `data` volume settings - ## - data: - ## @param persistence.data.preferStorageClassNames - preferStorageClassNames: - - kafka-data-eks - - kafka-data-aks - - kafka-data-gke - - kafka-data-ack - - kafka-data-tke - - ## @param persistence.data.storageClassName Storage class of backing PVC - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - storageClassName: - - ## @param persistence.data.size Size of data volume - ## - size: 10Gi - ## `log` volume settings - ## - log: - ## @param persistence.log.enabled Enable persistence using Persistent Volume Claims - ## - enabled: false - - ## @param persistence.log.preferStorageClassNames - preferStorageClassNames: - - kafka-eks-standard - - kafka-aks-standard - - kafka-gke-standard - - kafka-ack-standard - - kafka-tke-standard - - ## @param persistence.data.storageClassName Storage class of backing PVC - ## @param persistence.log.storageClassName Storage class of backing PVC - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - storageClassName: - ## @param persistence.log.size Size of data volume - ## - size: 5Gi - -## kafkaServer (kraft & broker) component settings. -## -kafkaServer: - ## @param replicaCount Kafka server replica count - ## - replicaCount: 1 - - - ## Kafka server workload pod resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## @param resources.limits The resources limits for the pod - ## @param resources.requests The requested resources for pod - ## - resources: {} +tlsEnable: false +# Enable SASL +saslEnable: false -## kafkaController (kraft) component settings. -## -kafkaController: - ## @param replicaCount Kafka server replica count - ## - replicaCount: 1 - - - ## Kafka server workload pod resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## @param resources.limits The resources limits for the pod - ## @param resources.requests The requested resources for pod - ## - resources: {} +# Enable Monitor +monitorEnable: false - -## kafkaBroker component settings. +## @param cpu ## -kafkaBroker: - ## @param replicaCount Kafka server replica count - ## - replicaCount: 1 - - - ## Kafka server workload pod resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## @param resources.limits The resources limits for the pod - ## @param resources.requests The requested resources for pod - ## - resources: {} +cpu: 0.5 - -## Monitoring configurations +## @param memory, the unit is Gi ## -monitor: - ## @param monitor.enabled if `true`, enable Cluster monitor capabilities - ## - enabled: true - ## Kafka exporter settings - ## - kafkaExporter: - ## Kafka exporter pod resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## @param resources.limits The resources limits for the pod - ## @param resources.requests The requested resources for pod - ## - resources: {} - ## @param replicaCount Kafka exporter replica count - ## - replicaCount: 1 - +memory: 1 -## Service configurations +## @param requests.cpu if not set, use cpu +## @param requests.memory, if not set, use memory ## -service: - ## @param service.type Kafka cluster service type, valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types - ## - type: ClusterIP +requests: +# cpu: +# memory: +storageEnable: true -## @param tolerations define global Tolerations for the cluster all pod's assignment -## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ -## -tolerations: [] +## kafka metaadata storage setting,the unit is Gi +metaStorage: 5 +## kafka data storage setting,the unit is Gi +dataStorage: 10 -## @param topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template -## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods -## -topologyKeys: - - kubernetes.io/hostname +## kafka log storage setting,the unit is Gi +logStorage: 2 +## kafka replicas for combined mode +replicas: 1 -## @param affinity is affinity setting for Kafka cluster pods assignment -## -affinity: {} +## kafka broker replicas +## only effective when clusterMode='separated' +brokerReplicas: 1 -nameOverride: "" -fullnameOverride: "" +## kafka controller replicas +## only effective when clusterMode='separated' +controllerReplicas: 1 -# The RBAC permission used by cluster component pod, now include event.create -serviceAccount: - name: "" +## kafka monitor component replicas +## only effective when monitorEnable=true +monitorReplicas: 1 diff --git a/deploy/kafka/Chart.yaml b/deploy/kafka/Chart.yaml index c9647cb3dc4..2268adf6152 100644 --- a/deploy/kafka/Chart.yaml +++ b/deploy/kafka/Chart.yaml @@ -13,7 +13,7 @@ type: application version: 0.6.0-alpha.28 -appVersion: 3.4.0 +appVersion: 3.3.2 home: https://kubeblocks.io/ icon: https://bitnami.com/assets/stacks/kafka/img/kafka-stack-220x234.png diff --git a/deploy/kafka/configs/kafka-server-constraint.cue b/deploy/kafka/configs/kafka-server-constraint.cue index b2c896dba81..24684904527 100644 --- a/deploy/kafka/configs/kafka-server-constraint.cue +++ b/deploy/kafka/configs/kafka-server-constraint.cue @@ -72,7 +72,7 @@ "metadata.log.max.record.bytes.between.snapshots"?: int & >=1 // This is the maximum number of milliseconds to wait to generate a snapshot if there are committed records in the log that are not included in the latest snapshot. - "metadata.log.max.snapshot.interval.ms"?: int & >=0 + // "metadata.log.max.snapshot.interval.ms"?: int & >=0 // The length of time in milliseconds between broker heartbeats. Used when running in KRaft mode. "broker.heartbeat.interval.ms"?: int @@ -399,7 +399,7 @@ "transaction.remove.expired.transaction.cleanup.interval.ms"?: int & >=1 // The time in ms that a topic partition leader will wait before expiring producer IDs. - "producer.id.expiration.ms"?: int & >=1 + // "producer.id.expiration.ms"?: int & >=1 // The maximum number of incremental fetch sessions that we will maintain. "max.incremental.fetch.session.cache.slots"?: int & >=0 diff --git a/deploy/kafka/configs/kafka-server.prop.tpl b/deploy/kafka/configs/kafka-server.prop.tpl index b3ae8cfc6ae..121e9bfd11b 100644 --- a/deploy/kafka/configs/kafka-server.prop.tpl +++ b/deploy/kafka/configs/kafka-server.prop.tpl @@ -1,4 +1,4 @@ -# broker config. generate from https://github.com/apache/kafka/blob/3.4/core/src/main/scala/kafka/server/KafkaConfig.scala#L1127 +# broker config. generate according to https://github.com/apache/kafka/blob/3.3.2/core/src/main/scala/kafka/server/KafkaConfig.scala#L1095 # Default Topic Configuration log.segment.bytes=1073741824 @@ -74,7 +74,6 @@ request.timeout.ms=30000 socket.connection.setup.timeout.ms=10000 socket.connection.setup.timeout.max.ms=30000 metadata.log.max.record.bytes.between.snapshots=20971520 -metadata.log.max.snapshot.interval.ms=3600000 broker.heartbeat.interval.ms=2000 broker.session.timeout.ms=9000 sasl.mechanism.controller.protocol=GSSAPI @@ -146,15 +145,12 @@ transaction.state.log.num.partitions=50 transaction.state.log.segment.bytes=104857600 transaction.abort.timed.out.transaction.cleanup.interval.ms=10000 transaction.remove.expired.transaction.cleanup.interval.ms=3600000 -producer.id.expiration.ms=86400000 -# producer.id.expiration.check.interval.ms=600000 max.incremental.fetch.session.cache.slots=1000 fetch.max.bytes=57671680 metrics.num.samples=2 metrics.sample.window.ms=30000 # metric.reporters= metrics.recording.level=INFO -# auto.include.jmx.reporter=true ## will deprecated in Kafka4.0, use metric.reporters instead # kafka.metrics.reporters= kafka.metrics.polling.interval.secs=10 quota.window.num=11 @@ -281,6 +277,5 @@ allow.everyone.if.no.acl.found=true # zookeeper.ssl.endpoint.identification.algorithm=HTTPS # zookeeper.ssl.crl.enable=false # zookeeper.ssl.ocsp.enable=false -# zookeeper.metadata.migration.enable=false # end (DON'T REMOVE THIS LINE) \ No newline at end of file diff --git a/deploy/kafka/scripts/kafka-sasl-sample.prop.tpl b/deploy/kafka/scripts/kafka-sasl-sample.prop.tpl index 9c3b029388b..c6b14742114 100644 --- a/deploy/kafka/scripts/kafka-sasl-sample.prop.tpl +++ b/deploy/kafka/scripts/kafka-sasl-sample.prop.tpl @@ -5,8 +5,8 @@ KafkaClient { }; KafkaServer { org.apache.kafka.common.security.plain.PlainLoginModule required - username="user" + username="admin" password="kubeblocks" - user_user="kubeblocks" + user_admin="kubeblocks" user_client="kubeblocks"; }; \ No newline at end of file diff --git a/deploy/kafka/scripts/kafka-server-setup.sh.tpl b/deploy/kafka/scripts/kafka-server-setup.sh.tpl index 77893004b2e..5b078cfc537 100644 --- a/deploy/kafka/scripts/kafka-server-setup.sh.tpl +++ b/deploy/kafka/scripts/kafka-server-setup.sh.tpl @@ -113,10 +113,10 @@ if [[ "broker" = "$KAFKA_CFG_PROCESS_ROLES" ]]; then # generate KAFKA_CFG_CONTROLLER_QUORUM_VOTERS for broker if not a combine-cluster {{- $voters := "" }} {{- range $i, $c := $.cluster.spec.componentSpecs }} - {{- if eq "kafka-controller" $c.componentDefRef }} + {{- if eq "controller" $c.componentDefRef }} {{- $replicas := $c.replicas | int }} {{- range $n, $e := until $replicas }} - {{- $podFQDN := printf "%s-%s-%d.%s-%s-headless.%s.svc.cluster.local" $clusterName $c.name $n $clusterName $c.name $namespace }} # Todo: cluster.local + {{- $podFQDN := printf "%s-%s-%d.%s-%s-headless.%s.svc.cluster.local" $clusterName $c.name $n $clusterName $c.name $namespace }} # {{- $voter := printf "%d@%s:9093" ( $n | int ) $podFQDN }} {{- $voters = printf "%s,%s" $voters $voter }} {{- end }} diff --git a/deploy/kafka/templates/NOTES.txt b/deploy/kafka/templates/NOTES.txt index 7146f9e1c0f..621410cbc44 100644 --- a/deploy/kafka/templates/NOTES.txt +++ b/deploy/kafka/templates/NOTES.txt @@ -4,4 +4,4 @@ APP VERSION: {{ .Chart.AppVersion }} KubeBlocks Kafka server cluster definition, start create your Kafka Server Cluster with following command: - kbcli cluster create --cluster-definition=kafka + kbcli cluster create kafka diff --git a/deploy/kafka/templates/clusterdefinition.yaml b/deploy/kafka/templates/clusterdefinition.yaml index b2c54314b36..f203b604dad 100644 --- a/deploy/kafka/templates/clusterdefinition.yaml +++ b/deploy/kafka/templates/clusterdefinition.yaml @@ -29,7 +29,6 @@ spec: deployment environments. workloadType: Stateful # Consensus characterType: kafka - maxUnavailable: 49% probes: monitor: builtIn: false @@ -127,8 +126,8 @@ spec: name: $(CONN_CREDENTIAL_SECRET_NAME) key: kraftClusterID optional: false - - name: KB_KAFKA_ENABLE_SASL # enable the SASL with plain mode - value: "false" +{{/* - name: KB_KAFKA_ENABLE_SASL # enable the SASL with plain mode*/}} +{{/* value: "false"*/}} - name: KB_KAFKA_SASL_CONFIG_PATH # specify the SASL jaas users value: /tools/server-jaas.properties ports: @@ -192,12 +191,11 @@ spec: mountPath: /etc/jmx-kafka # controller(kraft) Ref: https://kafka.apache.org/documentation/#kraft_role - - name: kafka-kraft + - name: controller description: |- Kafka controller that act as controllers (kraft) only server. workloadType: Stateful # Consensus characterType: kafka - maxUnavailable: 49% probes: monitor: builtIn: false @@ -213,6 +211,7 @@ spec: - name: kafka-jmx-configuration-tpl templateRef: {{ include "kafka.name" . }}-jmx-configuration-tpl volumeName: jmx-config + namespace: {{ .Release.Namespace }} scriptSpecs: - name: kafka-scripts-tpl templateRef: {{ include "kafka.name" . }}-scripts-tpl @@ -351,6 +350,7 @@ spec: - name: kafka-jmx-configuration-tpl templateRef: {{ include "kafka.name" . }}-jmx-configuration-tpl volumeName: jmx-config + namespace: {{ .Release.Namespace }} scriptSpecs: - name: kafka-scripts-tpl templateRef: {{ include "kafka.name" . }}-scripts-tpl @@ -428,8 +428,8 @@ spec: name: $(CONN_CREDENTIAL_SECRET_NAME) key: kraftClusterID optional: false - - name: KB_KAFKA_ENABLE_SASL # enable the SASL with plain mode - value: "true" +{{/* - name: KB_KAFKA_ENABLE_SASL # enable the SASL with plain mode*/}} +{{/* value: "true"*/}} - name: KB_KAFKA_SASL_CONFIG_PATH # specify the SASL jaas users value: /tools/server-jaas.properties - name: BROKER_MIN_NODE_ID diff --git a/deploy/kafka/templates/clusterversion.yaml b/deploy/kafka/templates/clusterversion.yaml index 602f77d524a..262d4524d99 100644 --- a/deploy/kafka/templates/clusterversion.yaml +++ b/deploy/kafka/templates/clusterversion.yaml @@ -29,7 +29,7 @@ spec: securityContext: runAsNonRoot: true runAsUser: 1001 - - componentDefRef: kafka-kraft + - componentDefRef: controller versionsContext: containers: - name: kafka diff --git a/deploy/kafka/values.yaml b/deploy/kafka/values.yaml index 0e77b385b33..4e4844c0238 100644 --- a/deploy/kafka/values.yaml +++ b/deploy/kafka/values.yaml @@ -17,7 +17,8 @@ images: pullPolicy: IfNotPresent kafka: repository: docker.io/bitnami/kafka - tag: 3.4.0-debian-11-r22 +# tag: 3.4.0-debian-11-r22 + tag: 3.3.2-debian-11-r54 kafkaExporter: repository: docker.io/bitnami/kafka-exporter tag: 1.6.0-debian-11-r67 diff --git a/deploy/kblib/templates/_resources.tpl b/deploy/kblib/templates/_resources.tpl index 56ae3825bbb..ee76b04dbc0 100644 --- a/deploy/kblib/templates/_resources.tpl +++ b/deploy/kblib/templates/_resources.tpl @@ -1,5 +1,5 @@ {{/* -Define component resources, including cpu, memory and storage +Define component resources, including cpu, memory */}} {{- define "kblib.componentResources" }} {{- $requestCPU := (float64 .Values.cpu) }} @@ -19,12 +19,4 @@ resources: requests: cpu: {{ $requestCPU | quote }} memory: {{ print $requestMemory "Gi" | quote }} -volumeClaimTemplates: - - name: data # ref clusterDefinition components.containers.volumeMounts.name - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: {{ print .Values.storage "Gi" }} {{- end }} \ No newline at end of file diff --git a/deploy/kblib/templates/_storages.tpl b/deploy/kblib/templates/_storages.tpl new file mode 100644 index 00000000000..b2304b46dc3 --- /dev/null +++ b/deploy/kblib/templates/_storages.tpl @@ -0,0 +1,13 @@ +{{/* +Define component storages, including volumeClaimTemplates +*/}} +{{- define "kblib.componentStorages" }} +volumeClaimTemplates: + - name: data # ref clusterDefinition components.containers.volumeMounts.name + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ print .Values.storage "Gi" }} +{{- end }} \ No newline at end of file diff --git a/deploy/tdengine-cluster/templates/cluster.yaml b/deploy/tdengine-cluster/templates/cluster.yaml index 99a1b61879a..c7e92829709 100644 --- a/deploy/tdengine-cluster/templates/cluster.yaml +++ b/deploy/tdengine-cluster/templates/cluster.yaml @@ -15,4 +15,5 @@ spec: {{- include "tdengine-cluster.replicaCount" . | indent 6 }} serviceAccountName: {{ include "kblib.serviceAccountName" . }} {{- include "kblib.componentResources" . | indent 6 }} + {{- include "kblib.componentStorages" . | indent 6 }} {{- include "kblib.componentServices" . | indent 6 }} \ No newline at end of file diff --git a/docs/user_docs/cli/kbcli_cluster_create.md b/docs/user_docs/cli/kbcli_cluster_create.md index 4634f36352d..ea824b9ba40 100644 --- a/docs/user_docs/cli/kbcli_cluster_create.md +++ b/docs/user_docs/cli/kbcli_cluster_create.md @@ -138,6 +138,7 @@ kbcli cluster create [NAME] [flags] ### SEE ALSO * [kbcli cluster](kbcli_cluster.md) - Cluster command. +* [kbcli cluster create kafka](kbcli_cluster_create_kafka.md) - Create a kafka cluster. * [kbcli cluster create mysql](kbcli_cluster_create_mysql.md) - Create a mysql cluster. * [kbcli cluster create redis](kbcli_cluster_create_redis.md) - Create a redis cluster. diff --git a/internal/cli/cluster/kafka.go b/internal/cli/cluster/kafka.go new file mode 100644 index 00000000000..d622ce6e59d --- /dev/null +++ b/internal/cli/cluster/kafka.go @@ -0,0 +1,34 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package cluster + +import ( + "embed" +) + +var ( + // run `make generate` to generate this embed file + //go:embed charts/kafka-cluster.tgz + kafkaChart embed.FS +) + +func init() { + registerClusterType("kafka", kafkaChart, "kafka-cluster.tgz") +} diff --git a/internal/cli/cmd/cluster/create_subcmds_test.go b/internal/cli/cmd/cluster/create_subcmds_test.go index 87dd6f1cdaa..a325c9ebcf3 100644 --- a/internal/cli/cmd/cluster/create_subcmds_test.go +++ b/internal/cli/cmd/cluster/create_subcmds_test.go @@ -24,6 +24,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -43,7 +44,7 @@ import ( "github.com/apecloud/kubeblocks/internal/cli/types" ) -var _ = Describe("create cluster by clusterType type", func() { +var _ = Describe("create cluster by cluster type", func() { const ( clusterType = "mysql" ) @@ -81,7 +82,7 @@ var _ = Describe("create cluster by clusterType type", func() { tf.Cleanup() }) - It("cluster sub command", func() { + It("create mysql cluster command", func() { By("create commands") cmds := buildCreateSubCmds(createOptions) Expect(cmds).ShouldNot(BeNil()) @@ -94,10 +95,16 @@ var _ = Describe("create cluster by clusterType type", func() { Expect(o.chartInfo).ShouldNot(BeNil()) By("complete") - cmd := cmds[0] + var mysqlCmd *cobra.Command + for _, c := range cmds { + if c.Name() == clusterType { + mysqlCmd = c + break + } + } o.Format = printer.YAML Expect(o.CreateOptions.Complete()).Should(Succeed()) - Expect(o.complete(cmd, nil)).Should(Succeed()) + Expect(o.complete(mysqlCmd, nil)).Should(Succeed()) Expect(o.Name).ShouldNot(BeEmpty()) Expect(o.values).ShouldNot(BeNil())