From a0ec15564ac807b7dbb1d8a203fe7903ad86f2d3 Mon Sep 17 00:00:00 2001 From: free6om Date: Sun, 25 Jun 2023 12:46:16 +0800 Subject: [PATCH] refactor: ReplicatedStateMachine (#3733) 1. rename the combined component(ConsensusSet&ReplicationSet) to RSM(Replicated State Machine) 2. make package coverage greater than 80.0% --- PROJECT | 2 +- ...pes.go => replicatedstatemachine_types.go} | 61 +-- ...k.go => replicatedstatemachine_webhook.go} | 32 +- ...=> replicatedstatemachine_webhook_test.go} | 26 +- apis/workloads/v1alpha1/webhook_suite_test.go | 2 +- cmd/manager/main.go | 20 +- ...kubeblocks.io_replicatedstatemachines.yaml | 39 +- config/crd/kustomization.yaml | 6 +- ...in_workloads_replicatedstatemachines.yaml} | 2 +- ...in_workloads_replicatedstatemachines.yaml} | 2 +- config/rbac/role.yaml | 6 +- ...s_replicatedstatemachine_editor_role.yaml} | 10 +- ...s_replicatedstatemachine_viewer_role.yaml} | 10 +- ...oads_v1alpha1_replicatedstatemachine.yaml} | 8 +- config/webhook/manifests.yaml | 12 +- controllers/k8score/event_controller.go | 4 +- controllers/k8score/event_controller_test.go | 189 +++++---- controllers/k8score/suite_test.go | 4 + ...o => replicatedstatemachine_controller.go} | 45 +- ...replicatedstatemachine_controller_test.go} | 16 +- controllers/workloads/suite_test.go | 2 +- deploy/helm/config/rbac/role.yaml | 6 +- ...kubeblocks.io_replicatedstatemachines.yaml | 39 +- .../admission/webhookconfiguration.yaml | 48 +++ ...ds_replicatedstatemachine_editor_role.yaml | 31 ++ ...ds_replicatedstatemachine_viewer_role.yaml | 27 ++ internal/controller/builder/builder_base.go | 6 + .../controller/builder/builder_base_test.go | 8 +- internal/controller/builder/builder_event.go | 54 +++ .../controller/builder/builder_event_test.go | 61 +++ .../suite_test.go => builder/builder_pvc.go} | 35 +- .../controller/builder/builder_pvc_test.go | 49 +++ ...go => builder_replicated_state_machine.go} | 36 +- ... builder_replicated_state_machine_test.go} | 44 +- internal/controller/graph/dag.go | 45 +- internal/controller/graph/dag_test.go | 87 ++++ internal/controller/model/transform_types.go | 10 - internal/controller/rsm/doc.go | 33 ++ .../{consensusset => rsm}/enqueue_ancestor.go | 89 +++- .../controller/rsm/enqueue_ancestor_test.go | 399 ++++++++++++++++++ .../{consensusset => rsm}/plan_builder.go | 64 +-- internal/controller/rsm/plan_builder_test.go | 215 ++++++++++ .../pod_role_event_handler.go | 14 +- .../rsm/pod_role_event_handler_test.go | 149 +++++++ internal/controller/rsm/suite_test.go | 228 ++++++++++ .../transformer_deletion.go | 19 +- .../rsm/transformer_deletion_test.go | 128 ++++++ .../transformer_fix_meta.go | 16 +- .../rsm/transformer_fix_meta_test.go | 72 ++++ .../{consensusset => rsm}/transformer_init.go | 14 +- .../controller/rsm/transformer_init_test.go | 64 +++ .../transformer_member_reconfiguration.go | 182 ++++---- ...transformer_member_reconfiguration_test.go | 283 +++++++++++++ .../transformer_object_generation.go | 144 +++---- .../transformer_objection_generation_test.go | 110 +++++ .../transformer_status.go | 50 +-- .../controller/rsm/transformer_status_test.go | 151 +++++++ .../transformer_update_strategy.go | 60 +-- .../rsm/transformer_update_strategy_test.go | 266 ++++++++++++ .../controller/{consensusset => rsm}/types.go | 49 ++- .../{consensusset => rsm}/update_plan.go | 35 +- internal/controller/rsm/update_plan_test.go | 157 +++++++ .../controller/{consensusset => rsm}/utils.go | 129 +++--- internal/controller/rsm/utils_test.go | 324 ++++++++++++++ internal/testutil/k8s/mocks/generate.go | 2 +- 65 files changed, 3786 insertions(+), 744 deletions(-) rename apis/workloads/v1alpha1/{consensusset_types.go => replicatedstatemachine_types.go} (85%) rename apis/workloads/v1alpha1/{consensusset_webhook.go => replicatedstatemachine_webhook.go} (65%) rename apis/workloads/v1alpha1/{consensusset_webhook_test.go => replicatedstatemachine_webhook_test.go} (80%) rename deploy/helm/crds/workloads.kubeblocks.io_consensussets.yaml => config/crd/bases/workloads.kubeblocks.io_replicatedstatemachines.yaml (99%) rename config/crd/patches/{cainjection_in_workloads_consensussets.yaml => cainjection_in_workloads_replicatedstatemachines.yaml} (81%) rename config/crd/patches/{webhook_in_workloads_consensussets.yaml => webhook_in_workloads_replicatedstatemachines.yaml} (86%) rename config/rbac/{workloads_consensusset_editor_role.yaml => workloads_replicatedstatemachine_editor_role.yaml} (68%) rename config/rbac/{workloads_consensusset_viewer_role.yaml => workloads_replicatedstatemachine_viewer_role.yaml} (65%) rename config/samples/{workloads_v1alpha1_consensusset.yaml => workloads_v1alpha1_replicatedstatemachine.yaml} (56%) rename controllers/workloads/{consensusset_controller.go => replicatedstatemachine_controller.go} (72%) rename controllers/workloads/{consensusset_controller_test.go => replicatedstatemachine_controller_test.go} (83%) rename config/crd/bases/workloads.kubeblocks.io_consensussets.yaml => deploy/helm/crds/workloads.kubeblocks.io_replicatedstatemachines.yaml (99%) create mode 100644 deploy/helm/templates/rbac/workloads_replicatedstatemachine_editor_role.yaml create mode 100644 deploy/helm/templates/rbac/workloads_replicatedstatemachine_viewer_role.yaml create mode 100644 internal/controller/builder/builder_event.go create mode 100644 internal/controller/builder/builder_event_test.go rename internal/controller/{consensusset/suite_test.go => builder/builder_pvc.go} (60%) create mode 100644 internal/controller/builder/builder_pvc_test.go rename internal/controller/builder/{builder_consensus_set.go => builder_replicated_state_machine.go} (50%) rename internal/controller/builder/{builder_consensus_set_test.go => builder_replicated_state_machine_test.go} (59%) create mode 100644 internal/controller/rsm/doc.go rename internal/controller/{consensusset => rsm}/enqueue_ancestor.go (78%) create mode 100644 internal/controller/rsm/enqueue_ancestor_test.go rename internal/controller/{consensusset => rsm}/plan_builder.go (76%) create mode 100644 internal/controller/rsm/plan_builder_test.go rename internal/controller/{consensusset => rsm}/pod_role_event_handler.go (94%) create mode 100644 internal/controller/rsm/pod_role_event_handler_test.go create mode 100644 internal/controller/rsm/suite_test.go rename internal/controller/{consensusset => rsm}/transformer_deletion.go (76%) create mode 100644 internal/controller/rsm/transformer_deletion_test.go rename internal/controller/{consensusset => rsm}/transformer_fix_meta.go (85%) create mode 100644 internal/controller/rsm/transformer_fix_meta_test.go rename internal/controller/{consensusset => rsm}/transformer_init.go (82%) create mode 100644 internal/controller/rsm/transformer_init_test.go rename internal/controller/{consensusset => rsm}/transformer_member_reconfiguration.go (63%) create mode 100644 internal/controller/rsm/transformer_member_reconfiguration_test.go rename internal/controller/{consensusset => rsm}/transformer_object_generation.go (72%) create mode 100644 internal/controller/rsm/transformer_objection_generation_test.go rename internal/controller/{consensusset => rsm}/transformer_status.go (54%) create mode 100644 internal/controller/rsm/transformer_status_test.go rename internal/controller/{consensusset => rsm}/transformer_update_strategy.go (72%) create mode 100644 internal/controller/rsm/transformer_update_strategy_test.go rename internal/controller/{consensusset => rsm}/types.go (61%) rename internal/controller/{consensusset => rsm}/update_plan.go (88%) create mode 100644 internal/controller/rsm/update_plan_test.go rename internal/controller/{consensusset => rsm}/utils.go (73%) create mode 100644 internal/controller/rsm/utils_test.go diff --git a/PROJECT b/PROJECT index 1d0d5e57941..ccd19312080 100644 --- a/PROJECT +++ b/PROJECT @@ -145,7 +145,7 @@ resources: controller: true domain: kubeblocks.io group: workloads - kind: ConsensusSet + kind: ReplicatedStateMachine path: github.com/apecloud/kubeblocks/apis/workloads/v1alpha1 version: v1alpha1 webhooks: diff --git a/apis/workloads/v1alpha1/consensusset_types.go b/apis/workloads/v1alpha1/replicatedstatemachine_types.go similarity index 85% rename from apis/workloads/v1alpha1/consensusset_types.go rename to apis/workloads/v1alpha1/replicatedstatemachine_types.go index b3a9164d489..85838475123 100644 --- a/apis/workloads/v1alpha1/consensusset_types.go +++ b/apis/workloads/v1alpha1/replicatedstatemachine_types.go @@ -28,8 +28,8 @@ import ( // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. -// ConsensusSetSpec defines the desired state of ConsensusSet -type ConsensusSetSpec struct { +// ReplicatedStateMachineSpec defines the desired state of ReplicatedStateMachine +type ReplicatedStateMachineSpec struct { // Replicas defines number of Pods // +kubebuilder:default=1 // +kubebuilder:validation:Minimum=0 @@ -46,7 +46,7 @@ type ConsensusSetSpec struct { Template corev1.PodTemplateSpec `json:"template"` // volumeClaimTemplates is a list of claims that pods are allowed to reference. - // The ConsensusSet controller is responsible for mapping network identities to + // The ReplicatedStateMachine controller is responsible for mapping network identities to // claims in a way that maintains the identity of a pod. Every claim in // this list must have at least one matching (by name) volumeMount in one // container in the template. A claim in this list takes precedence over @@ -54,9 +54,9 @@ type ConsensusSetSpec struct { // +optional VolumeClaimTemplates []corev1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty"` - // Roles, a list of roles defined in this consensus system. + // Roles, a list of roles defined in the system. // +kubebuilder:validation:Required - Roles []ConsensusRole `json:"roles"` + Roles []ReplicaRole `json:"roles"` // RoleObservation provides method to observe role. // +kubebuilder:validation:Required @@ -82,8 +82,8 @@ type ConsensusSetSpec struct { Credential *Credential `json:"credential,omitempty"` } -// ConsensusSetStatus defines the observed state of ConsensusSet -type ConsensusSetStatus struct { +// ReplicatedStateMachineStatus defines the observed state of ReplicatedStateMachine +type ReplicatedStateMachineStatus struct { appsv1.StatefulSetStatus `json:",inline"` // InitReplicas is the number of pods(members) when cluster first initialized @@ -97,36 +97,36 @@ type ConsensusSetStatus struct { // members' status. // +optional - MembersStatus []ConsensusMemberStatus `json:"membersStatus,omitempty"` + MembersStatus []MemberStatus `json:"membersStatus,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// +kubebuilder:resource:categories={kubeblocks,all},shortName=csset +// +kubebuilder:resource:categories={kubeblocks,all},shortName=rsm // +kubebuilder:printcolumn:name="LEADER",type="string",JSONPath=".status.membersStatus[?(@.role.isLeader==true)].podName",description="leader pod name." // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.readyReplicas",description="ready replicas." // +kubebuilder:printcolumn:name="REPLICAS",type="string",JSONPath=".status.replicas",description="total replicas." // +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" -// ConsensusSet is the Schema for the consensussets API -type ConsensusSet struct { +// ReplicatedStateMachine is the Schema for the replicatedstatemachines API +type ReplicatedStateMachine struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - Spec ConsensusSetSpec `json:"spec,omitempty"` - Status ConsensusSetStatus `json:"status,omitempty"` + Spec ReplicatedStateMachineSpec `json:"spec,omitempty"` + Status ReplicatedStateMachineStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true -// ConsensusSetList contains a list of ConsensusSet -type ConsensusSetList struct { +// ReplicatedStateMachineList contains a list of ReplicatedStateMachine +type ReplicatedStateMachineList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` - Items []ConsensusSet `json:"items"` + Items []ReplicatedStateMachine `json:"items"` } -type ConsensusRole struct { +type ReplicaRole struct { // Name, role name. // +kubebuilder:validation:Required // +kubebuilder:default=leader @@ -175,9 +175,9 @@ type RoleObservation struct { // after all actions done, the final output should be a single string of the role name defined in spec.Roles // latest [BusyBox](https://busybox.net/) image will be used if Image not configured // Environment variables can be used in Command: - // - v_KB_CONSENSUS_SET_LAST_STDOUT stdout from last action, watch 'v_' prefixed - // - KB_CONSENSUS_SET_USERNAME username part of credential - // - KB_CONSENSUS_SET_PASSWORD password part of credential + // - v_KB_RSM_LAST_STDOUT stdout from last action, watch 'v_' prefixed + // - KB_RSM_USERNAME username part of credential + // - KB_RSM_PASSWORD password part of credential // +kubebuilder:validation:Required ObservationActions []Action `json:"observationActions"` @@ -201,6 +201,7 @@ type RoleObservation struct { // +optional PeriodSeconds int32 `json:"periodSeconds,omitempty"` + // Minimum consecutive successes for the observation to be considered successful after having failed. // Minimum consecutive successes for the observation to be considered successful after having failed. // Defaults to 1. Minimum value is 1. // +kubebuilder:default=1 @@ -218,12 +219,12 @@ type RoleObservation struct { type Credential struct { // Username - // variable name will be KB_CONSENSUS_SET_USERNAME + // variable name will be KB_RSM_USERNAME // +kubebuilder:validation:Required Username CredentialVar `json:"username"` // Password - // variable name will be KB_CONSENSUS_SET_PASSWORD + // variable name will be KB_RSM_PASSWORD // +kubebuilder:validation:Required Password CredentialVar `json:"password"` } @@ -250,11 +251,11 @@ type CredentialVar struct { type MembershipReconfiguration struct { // Environment variables can be used in all following Actions: - // - KB_CONSENSUS_SET_USERNAME username part of credential - // - KB_CONSENSUS_SET_PASSWORD password part of credential - // - KB_CONSENSUS_SET_LEADER_HOST leader host - // - KB_CONSENSUS_SET_TARGET_HOST target host - // - KB_CONSENSUS_SET_SERVICE_PORT port + // - KB_RSM_USERNAME username part of credential + // - KB_RSM_PASSWORD password part of credential + // - KB_RSM_LEADER_HOST leader host + // - KB_RSM_TARGET_HOST target host + // - KB_RSM_SERVICE_PORT port // SwitchoverAction specifies how to do switchover // latest [BusyBox](https://busybox.net/) image will be used if Image not configured @@ -292,15 +293,15 @@ type Action struct { Command []string `json:"command"` } -type ConsensusMemberStatus struct { +type MemberStatus struct { // PodName pod name. // +kubebuilder:validation:Required // +kubebuilder:default=Unknown PodName string `json:"podName"` - ConsensusRole `json:"role"` + ReplicaRole `json:"role"` } func init() { - SchemeBuilder.Register(&ConsensusSet{}, &ConsensusSetList{}) + SchemeBuilder.Register(&ReplicatedStateMachine{}, &ReplicatedStateMachineList{}) } diff --git a/apis/workloads/v1alpha1/consensusset_webhook.go b/apis/workloads/v1alpha1/replicatedstatemachine_webhook.go similarity index 65% rename from apis/workloads/v1alpha1/consensusset_webhook.go rename to apis/workloads/v1alpha1/replicatedstatemachine_webhook.go index b7e575e2beb..f4eb9cab5bf 100644 --- a/apis/workloads/v1alpha1/consensusset_webhook.go +++ b/apis/workloads/v1alpha1/replicatedstatemachine_webhook.go @@ -30,9 +30,9 @@ import ( ) // log is for logging in this package. -var consensussetlog = logf.Log.WithName("consensusset-resource") +var replicatedstatemachinelog = logf.Log.WithName("replicatedstatemachine-resource") -func (r *ConsensusSet) SetupWebhookWithManager(mgr ctrl.Manager) error { +func (r *ReplicatedStateMachine) SetupWebhookWithManager(mgr ctrl.Manager) error { return ctrl.NewWebhookManagedBy(mgr). For(r). Complete() @@ -40,44 +40,44 @@ func (r *ConsensusSet) SetupWebhookWithManager(mgr ctrl.Manager) error { // TODO(user): EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -//+kubebuilder:webhook:path=/mutate-workloads-kubeblocks-io-v1alpha1-consensusset,mutating=true,failurePolicy=fail,sideEffects=None,groups=workloads.kubeblocks.io,resources=consensussets,verbs=create;update,versions=v1alpha1,name=mconsensusset.kb.io,admissionReviewVersions=v1 +//+kubebuilder:webhook:path=/mutate-workloads-kubeblocks-io-v1alpha1-replicatedstatemachine,mutating=true,failurePolicy=fail,sideEffects=None,groups=workloads.kubeblocks.io,resources=replicatedstatemachines,verbs=create;update,versions=v1alpha1,name=mreplicatedstatemachine.kb.io,admissionReviewVersions=v1 -var _ webhook.Defaulter = &ConsensusSet{} +var _ webhook.Defaulter = &ReplicatedStateMachine{} // Default implements webhook.Defaulter so a webhook will be registered for the type -func (r *ConsensusSet) Default() { - consensussetlog.Info("default", "name", r.Name) +func (r *ReplicatedStateMachine) Default() { + replicatedstatemachinelog.Info("default", "name", r.Name) // TODO(user): fill in your defaulting logic. } // TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. -//+kubebuilder:webhook:path=/validate-workloads-kubeblocks-io-v1alpha1-consensusset,mutating=false,failurePolicy=fail,sideEffects=None,groups=workloads.kubeblocks.io,resources=consensussets,verbs=create;update,versions=v1alpha1,name=vconsensusset.kb.io,admissionReviewVersions=v1 +//+kubebuilder:webhook:path=/validate-workloads-kubeblocks-io-v1alpha1-replicatedstatemachine,mutating=false,failurePolicy=fail,sideEffects=None,groups=workloads.kubeblocks.io,resources=replicatedstatemachines,verbs=create;update,versions=v1alpha1,name=vreplicatedstatemachine.kb.io,admissionReviewVersions=v1 -var _ webhook.Validator = &ConsensusSet{} +var _ webhook.Validator = &ReplicatedStateMachine{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type -func (r *ConsensusSet) ValidateCreate() error { - consensussetlog.Info("validate create", "name", r.Name) +func (r *ReplicatedStateMachine) ValidateCreate() error { + replicatedstatemachinelog.Info("validate create", "name", r.Name) return r.validate() } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type -func (r *ConsensusSet) ValidateUpdate(old runtime.Object) error { - consensussetlog.Info("validate update", "name", r.Name) +func (r *ReplicatedStateMachine) ValidateUpdate(old runtime.Object) error { + replicatedstatemachinelog.Info("validate update", "name", r.Name) return r.validate() } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type -func (r *ConsensusSet) ValidateDelete() error { - consensussetlog.Info("validate delete", "name", r.Name) +func (r *ReplicatedStateMachine) ValidateDelete() error { + replicatedstatemachinelog.Info("validate delete", "name", r.Name) return r.validate() } -func (r *ConsensusSet) validate() error { +func (r *ReplicatedStateMachine) validate() error { var allErrs field.ErrorList // Leader is required @@ -104,7 +104,7 @@ func (r *ConsensusSet) validate() error { return apierrors.NewInvalid( schema.GroupKind{ Group: "workloads.kubeblocks.io/v1alpha1", - Kind: "ConsensusSet", + Kind: "ReplicatedStateMachine", }, r.Name, allErrs) } diff --git a/apis/workloads/v1alpha1/consensusset_webhook_test.go b/apis/workloads/v1alpha1/replicatedstatemachine_webhook_test.go similarity index 80% rename from apis/workloads/v1alpha1/consensusset_webhook_test.go rename to apis/workloads/v1alpha1/replicatedstatemachine_webhook_test.go index 7538bea0002..d10ce36e2f6 100644 --- a/apis/workloads/v1alpha1/consensusset_webhook_test.go +++ b/apis/workloads/v1alpha1/replicatedstatemachine_webhook_test.go @@ -28,18 +28,18 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -var _ = Describe("ConsensusSet Webhook", func() { +var _ = Describe("ReplicatedStateMachine Webhook", func() { Context("spec validation", func() { - const name = "test-consensus-set" - var csSet *ConsensusSet + const name = "test-replicated-state-machine" + var rsm *ReplicatedStateMachine BeforeEach(func() { - csSet = &ConsensusSet{ + rsm = &ReplicatedStateMachine{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: testCtx.DefaultNamespace, }, - Spec: ConsensusSetSpec{ + Spec: ReplicatedStateMachineSpec{ Replicas: 1, RoleObservation: RoleObservation{ ObservationActions: []Action{ @@ -64,48 +64,48 @@ var _ = Describe("ConsensusSet Webhook", func() { }) It("should return an error if no leader set", func() { - csSet.Spec.Roles = []ConsensusRole{ + rsm.Spec.Roles = []ReplicaRole{ { Name: "leader", IsLeader: false, AccessMode: ReadWriteMode, }, } - err := k8sClient.Create(ctx, csSet) + err := k8sClient.Create(ctx, rsm) Expect(err).ShouldNot(BeNil()) Expect(err.Error()).Should(ContainSubstring("leader is required")) }) It("should return an error if servicePort not provided", func() { - csSet.Spec.Roles = []ConsensusRole{ + rsm.Spec.Roles = []ReplicaRole{ { Name: "leader", IsLeader: true, AccessMode: ReadWriteMode, }, } - err := k8sClient.Create(ctx, csSet) + err := k8sClient.Create(ctx, rsm) Expect(err).ShouldNot(BeNil()) Expect(err.Error()).Should(ContainSubstring("servicePort must provide")) }) It("should succeed if spec is well defined", func() { - csSet.Spec.Roles = []ConsensusRole{ + rsm.Spec.Roles = []ReplicaRole{ { Name: "leader", IsLeader: true, AccessMode: ReadWriteMode, }, } - csSet.Spec.Service.Ports = []corev1.ServicePort{ + rsm.Spec.Service.Ports = []corev1.ServicePort{ { Name: "foo", Protocol: "tcp", Port: 12345, }, } - Expect(k8sClient.Create(ctx, csSet)).Should(Succeed()) - Expect(k8sClient.Delete(ctx, csSet)).Should(Succeed()) + Expect(k8sClient.Create(ctx, rsm)).Should(Succeed()) + Expect(k8sClient.Delete(ctx, rsm)).Should(Succeed()) }) }) }) diff --git a/apis/workloads/v1alpha1/webhook_suite_test.go b/apis/workloads/v1alpha1/webhook_suite_test.go index e0f72fdc870..e153406dbb2 100644 --- a/apis/workloads/v1alpha1/webhook_suite_test.go +++ b/apis/workloads/v1alpha1/webhook_suite_test.go @@ -105,7 +105,7 @@ var _ = BeforeSuite(func() { }) Expect(err).NotTo(HaveOccurred()) - err = (&ConsensusSet{}).SetupWebhookWithManager(mgr) + err = (&ReplicatedStateMachine{}).SetupWebhookWithManager(mgr) Expect(err).NotTo(HaveOccurred()) testCtx = testutil.NewDefaultTestContext(ctx, k8sClient, testEnv) diff --git a/cmd/manager/main.go b/cmd/manager/main.go index b6ce932f153..6c5995d2d4b 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -357,15 +357,13 @@ func main() { } } - if viper.GetBool("enable_consensus_set") { - if err = (&workloadscontrollers.ConsensusSetReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Recorder: mgr.GetEventRecorderFor("consensus-set-controller"), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "ConsensusSet") - os.Exit(1) - } + if err = (&workloadscontrollers.ReplicatedStateMachineReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("replicated-state-machine-controller"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "ReplicatedStateMachine") + os.Exit(1) } // +kubebuilder:scaffold:builder @@ -448,8 +446,8 @@ func main() { os.Exit(1) } - if err = (&workloadsv1alpha1.ConsensusSet{}).SetupWebhookWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create webhook", "webhook", "ConsensusSet") + if err = (&workloadsv1alpha1.ReplicatedStateMachine{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "ReplicatedStateMachine") os.Exit(1) } diff --git a/deploy/helm/crds/workloads.kubeblocks.io_consensussets.yaml b/config/crd/bases/workloads.kubeblocks.io_replicatedstatemachines.yaml similarity index 99% rename from deploy/helm/crds/workloads.kubeblocks.io_consensussets.yaml rename to config/crd/bases/workloads.kubeblocks.io_replicatedstatemachines.yaml index 4bcdc8f360e..d313fb71b4f 100644 --- a/deploy/helm/crds/workloads.kubeblocks.io_consensussets.yaml +++ b/config/crd/bases/workloads.kubeblocks.io_replicatedstatemachines.yaml @@ -5,19 +5,19 @@ metadata: annotations: controller-gen.kubebuilder.io/version: v0.9.0 creationTimestamp: null - name: consensussets.workloads.kubeblocks.io + name: replicatedstatemachines.workloads.kubeblocks.io spec: group: workloads.kubeblocks.io names: categories: - kubeblocks - all - kind: ConsensusSet - listKind: ConsensusSetList - plural: consensussets + kind: ReplicatedStateMachine + listKind: ReplicatedStateMachineList + plural: replicatedstatemachines shortNames: - - csset - singular: consensusset + - rsm + singular: replicatedstatemachine scope: Namespaced versions: - additionalPrinterColumns: @@ -39,7 +39,8 @@ spec: name: v1alpha1 schema: openAPIV3Schema: - description: ConsensusSet is the Schema for the consensussets API + description: ReplicatedStateMachine is the Schema for the replicatedstatemachines + API properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -54,13 +55,13 @@ spec: metadata: type: object spec: - description: ConsensusSetSpec defines the desired state of ConsensusSet + description: ReplicatedStateMachineSpec defines the desired state of ReplicatedStateMachine properties: credential: description: Credential used to connect to DB engine properties: password: - description: Password variable name will be KB_CONSENSUS_SET_PASSWORD + description: Password variable name will be KB_RSM_PASSWORD properties: value: description: 'Variable references $(VAR_NAME) are expanded @@ -158,7 +159,7 @@ spec: type: object type: object username: - description: Username variable name will be KB_CONSENSUS_SET_USERNAME + description: Username variable name will be KB_RSM_USERNAME properties: value: description: 'Variable references $(VAR_NAME) are expanded @@ -382,9 +383,9 @@ spec: single string of the role name defined in spec.Roles latest [BusyBox](https://busybox.net/) image will be used if Image not configured Environment variables can be used in Command: - - v_KB_CONSENSUS_SET_LAST_STDOUT stdout from last action, watch - ''v_'' prefixed - KB_CONSENSUS_SET_USERNAME username part of - credential - KB_CONSENSUS_SET_PASSWORD password part of credential' + - v_KB_RSM_LAST_STDOUT stdout from last action, watch ''v_'' + prefixed - KB_RSM_USERNAME username part of credential - KB_RSM_PASSWORD + password part of credential' items: properties: command: @@ -411,8 +412,9 @@ spec: successThreshold: default: 1 description: Minimum consecutive successes for the observation - to be considered successful after having failed. Defaults to - 1. Minimum value is 1. + to be considered successful after having failed. Minimum consecutive + successes for the observation to be considered successful after + having failed. Defaults to 1. Minimum value is 1. format: int32 minimum: 1 type: integer @@ -427,7 +429,7 @@ spec: - observationActions type: object roles: - description: Roles, a list of roles defined in this consensus system. + description: Roles, a list of roles defined in the system. items: properties: accessMode: @@ -8156,7 +8158,7 @@ spec: type: string volumeClaimTemplates: description: volumeClaimTemplates is a list of claims that pods are - allowed to reference. The ConsensusSet controller is responsible + allowed to reference. The ReplicatedStateMachine controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. @@ -8507,7 +8509,8 @@ spec: - template type: object status: - description: ConsensusSetStatus defines the observed state of ConsensusSet + description: ReplicatedStateMachineStatus defines the observed state of + ReplicatedStateMachine properties: availableReplicas: description: Total number of available pods (ready for at least minReadySeconds) diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 4d1ebf0c839..0049c47dcb7 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -15,7 +15,7 @@ resources: - bases/extensions.kubeblocks.io_addons.yaml - bases/apps.kubeblocks.io_componentresourceconstraints.yaml - bases/apps.kubeblocks.io_componentclassdefinitions.yaml -- bases/workloads.kubeblocks.io_consensussets.yaml +- bases/workloads.kubeblocks.io_replicatedstatemachines.yaml #+kubebuilder:scaffold:crdkustomizeresource patchesStrategicMerge: @@ -37,7 +37,7 @@ patchesStrategicMerge: #- patches/webhook_in_addons.yaml #- patches/webhook_in_componentresourceconstraints.yaml #- patches/webhook_in_componentclassdefinitions.yaml -#- patches/webhook_in_consensussets.yaml +#- patches/webhook_in_replicatedstatemachines.yaml #+kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. @@ -58,7 +58,7 @@ patchesStrategicMerge: #- patches/cainjection_in_addonspecs.yaml #- patches/cainjection_in_componentresourceconstraints.yaml #- patches/cainjection_in_componentclassdefinitions.yaml -#- patches/cainjection_in_consensussets.yaml +#- patches/cainjection_in_replicatedstatemachines.yaml #+kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/config/crd/patches/cainjection_in_workloads_consensussets.yaml b/config/crd/patches/cainjection_in_workloads_replicatedstatemachines.yaml similarity index 81% rename from config/crd/patches/cainjection_in_workloads_consensussets.yaml rename to config/crd/patches/cainjection_in_workloads_replicatedstatemachines.yaml index f505ec87df6..28731ab2563 100644 --- a/config/crd/patches/cainjection_in_workloads_consensussets.yaml +++ b/config/crd/patches/cainjection_in_workloads_replicatedstatemachines.yaml @@ -4,4 +4,4 @@ kind: CustomResourceDefinition metadata: annotations: cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) - name: consensussets.workloads.kubeblocks.io + name: replicatedstatemachines.workloads.kubeblocks.io diff --git a/config/crd/patches/webhook_in_workloads_consensussets.yaml b/config/crd/patches/webhook_in_workloads_replicatedstatemachines.yaml similarity index 86% rename from config/crd/patches/webhook_in_workloads_consensussets.yaml rename to config/crd/patches/webhook_in_workloads_replicatedstatemachines.yaml index c27e9378e78..e24f648c4ee 100644 --- a/config/crd/patches/webhook_in_workloads_consensussets.yaml +++ b/config/crd/patches/webhook_in_workloads_replicatedstatemachines.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: consensussets.workloads.kubeblocks.io + name: replicatedstatemachines.workloads.kubeblocks.io spec: conversion: strategy: Webhook diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 5352a83eaa8..b215eca6b25 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -682,7 +682,7 @@ rules: - apiGroups: - workloads.kubeblocks.io resources: - - consensussets + - replicatedstatemachines verbs: - create - delete @@ -694,13 +694,13 @@ rules: - apiGroups: - workloads.kubeblocks.io resources: - - consensussets/finalizers + - replicatedstatemachines/finalizers verbs: - update - apiGroups: - workloads.kubeblocks.io resources: - - consensussets/status + - replicatedstatemachines/status verbs: - get - patch diff --git a/config/rbac/workloads_consensusset_editor_role.yaml b/config/rbac/workloads_replicatedstatemachine_editor_role.yaml similarity index 68% rename from config/rbac/workloads_consensusset_editor_role.yaml rename to config/rbac/workloads_replicatedstatemachine_editor_role.yaml index 1aa4b1efb14..69c5a5665e9 100644 --- a/config/rbac/workloads_consensusset_editor_role.yaml +++ b/config/rbac/workloads_replicatedstatemachine_editor_role.yaml @@ -1,20 +1,20 @@ -# permissions for end users to edit consensussets. +# permissions for end users to edit replicatedstatemachines. apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: app.kubernetes.io/name: clusterrole - app.kubernetes.io/instance: consensusset-editor-role + app.kubernetes.io/instance: replicatedstatemachine-editor-role app.kubernetes.io/component: rbac app.kubernetes.io/created-by: kubeblocks app.kubernetes.io/part-of: kubeblocks app.kubernetes.io/managed-by: kustomize - name: consensusset-editor-role + name: replicatedstatemachine-editor-role rules: - apiGroups: - workloads.kubeblocks.io resources: - - consensussets + - replicatedstatemachines verbs: - create - delete @@ -26,6 +26,6 @@ rules: - apiGroups: - workloads.kubeblocks.io resources: - - consensussets/status + - replicatedstatemachines/status verbs: - get diff --git a/config/rbac/workloads_consensusset_viewer_role.yaml b/config/rbac/workloads_replicatedstatemachine_viewer_role.yaml similarity index 65% rename from config/rbac/workloads_consensusset_viewer_role.yaml rename to config/rbac/workloads_replicatedstatemachine_viewer_role.yaml index 13bf9399fb8..087fa590ba2 100644 --- a/config/rbac/workloads_consensusset_viewer_role.yaml +++ b/config/rbac/workloads_replicatedstatemachine_viewer_role.yaml @@ -1,20 +1,20 @@ -# permissions for end users to view consensussets. +# permissions for end users to view replicatedstatemachines. apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: app.kubernetes.io/name: clusterrole - app.kubernetes.io/instance: consensusset-viewer-role + app.kubernetes.io/instance: replicatedstatemachine-viewer-role app.kubernetes.io/component: rbac app.kubernetes.io/created-by: kubeblocks app.kubernetes.io/part-of: kubeblocks app.kubernetes.io/managed-by: kustomize - name: consensusset-viewer-role + name: replicatedstatemachines-viewer-role rules: - apiGroups: - workloads.kubeblocks.io resources: - - consensussets + - replicatedstatemachines verbs: - get - list @@ -22,6 +22,6 @@ rules: - apiGroups: - workloads.kubeblocks.io resources: - - consensussets/status + - replicatedstatemachines/status verbs: - get diff --git a/config/samples/workloads_v1alpha1_consensusset.yaml b/config/samples/workloads_v1alpha1_replicatedstatemachine.yaml similarity index 56% rename from config/samples/workloads_v1alpha1_consensusset.yaml rename to config/samples/workloads_v1alpha1_replicatedstatemachine.yaml index d0bdb22f305..b320af79628 100644 --- a/config/samples/workloads_v1alpha1_consensusset.yaml +++ b/config/samples/workloads_v1alpha1_replicatedstatemachine.yaml @@ -1,12 +1,12 @@ apiVersion: workloads.kubeblocks.io/v1alpha1 -kind: ConsensusSet +kind: ReplicatedStateMachine metadata: labels: - app.kubernetes.io/name: consensusset - app.kubernetes.io/instance: consensusset-sample + app.kubernetes.io/name: replicatedstatemachine + app.kubernetes.io/instance: replicatedstatemachine-sample app.kubernetes.io/part-of: kubeblocks app.kubernetes.io/managed-by: kustomize app.kubernetes.io/created-by: kubeblocks - name: consensusset-sample + name: replicatedstatemachine-sample spec: # TODO(user): Add fields here diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index cd39a2e115c..764ccb5cff0 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -31,9 +31,9 @@ webhooks: service: name: webhook-service namespace: system - path: /mutate-workloads-kubeblocks-io-v1alpha1-consensusset + path: /mutate-workloads-kubeblocks-io-v1alpha1-replicatedstatemachine failurePolicy: Fail - name: mconsensusset.kb.io + name: mreplicatedstatemachine.kb.io rules: - apiGroups: - workloads.kubeblocks.io @@ -43,7 +43,7 @@ webhooks: - CREATE - UPDATE resources: - - consensussets + - replicatedstatemachines sideEffects: None --- apiVersion: admissionregistration.k8s.io/v1 @@ -138,9 +138,9 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-workloads-kubeblocks-io-v1alpha1-consensusset + path: /validate-workloads-kubeblocks-io-v1alpha1-replicatedstatemachine failurePolicy: Fail - name: vconsensusset.kb.io + name: vreplicatedstatemachine.kb.io rules: - apiGroups: - workloads.kubeblocks.io @@ -150,5 +150,5 @@ webhooks: - CREATE - UPDATE resources: - - consensussets + - replicatedstatemachines sideEffects: None diff --git a/controllers/k8score/event_controller.go b/controllers/k8score/event_controller.go index 1acd27a970e..f544bb0e0e0 100644 --- a/controllers/k8score/event_controller.go +++ b/controllers/k8score/event_controller.go @@ -38,7 +38,7 @@ import ( "github.com/apecloud/kubeblocks/controllers/apps/components/replication" componentutil "github.com/apecloud/kubeblocks/controllers/apps/components/util" "github.com/apecloud/kubeblocks/internal/constant" - "github.com/apecloud/kubeblocks/internal/controller/consensusset" + "github.com/apecloud/kubeblocks/internal/controller/rsm" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" probeutil "github.com/apecloud/kubeblocks/internal/sqlchannel/util" ) @@ -76,7 +76,7 @@ var _ EventHandler = &RoleChangeEventHandler{} func init() { EventHandlerMap["role-change-handler"] = &RoleChangeEventHandler{} - EventHandlerMap["consensus-set-event-handler"] = &consensusset.PodRoleEventHandler{} + EventHandlerMap["rsm-event-handler"] = &rsm.PodRoleEventHandler{} } // Reconcile is part of the main kubernetes reconciliation loop which aims to diff --git a/controllers/k8score/event_controller_test.go b/controllers/k8score/event_controller_test.go index e12537cf01d..b6ec986689e 100644 --- a/controllers/k8score/event_controller_test.go +++ b/controllers/k8score/event_controller_test.go @@ -20,9 +20,8 @@ along with this program. If not, see . package k8score import ( - "bytes" - "context" - "text/template" + "fmt" + "strings" "time" . "github.com/onsi/ginkgo/v2" @@ -30,27 +29,21 @@ import ( "github.com/sethvargo/go-password/password" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" + appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" + "github.com/apecloud/kubeblocks/internal/constant" + "github.com/apecloud/kubeblocks/internal/controller/builder" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" "github.com/apecloud/kubeblocks/internal/generics" probeutil "github.com/apecloud/kubeblocks/internal/sqlchannel/util" testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" ) -type roleEventValue struct { - PodName string - EventSeq string - Role string -} - var _ = Describe("Event Controller", func() { - var ctx = context.Background() - cleanEnv := func() { // must wait till resources deleted and no longer existed before the testcases start, // otherwise if later it needs to create some new resource objects with the same name, @@ -58,6 +51,8 @@ var _ = Describe("Event Controller", func() { // create the new objects. By("clean resources") + testapps.ClearClusterResources(&testCtx) + // delete rest mocked objects inNS := client.InNamespace(testCtx.DefaultNamespace) ml := client.HasLabels{testCtx.TestObjLabelKey} @@ -66,27 +61,76 @@ var _ = Describe("Event Controller", func() { testapps.ClearResources(&testCtx, generics.PodSignature, inNS, ml) } + createRoleChangedEvent := func(podName, role string, podUid types.UID) *corev1.Event { + seq, _ := password.Generate(16, 16, 0, true, true) + objectRef := corev1.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: testCtx.DefaultNamespace, + Name: podName, + UID: podUid, + } + eventName := strings.Join([]string{podName, seq}, ".") + return builder.NewEventBuilder(testCtx.DefaultNamespace, eventName). + SetInvolvedObject(objectRef). + SetMessage(fmt.Sprintf("{\"event\":\"roleChanged\",\"originalRole\":\"secondary\",\"role\":\"%s\"}", role)). + SetReason(string(probeutil.CheckRoleOperation)). + SetType(corev1.EventTypeNormal). + GetObject() + } + + createInvolvedPod := func(name, clusterName, componentName string) *corev1.Pod { + return builder.NewPodBuilder(testCtx.DefaultNamespace, name). + AddLabels(constant.AppInstanceLabelKey, clusterName). + AddLabels(constant.KBAppComponentLabelKey, componentName). + SetContainers([]corev1.Container{ + { + Image: "foo", + Name: "bar", + }, + }). + GetObject() + } + BeforeEach(cleanEnv) AfterEach(cleanEnv) Context("When receiving role changed event", func() { It("should handle it properly", func() { + By("create cluster & clusterDef") + clusterDefName := "foo" + consensusCompName := "consensus" + consensusCompDefName := "consensus" + clusterDefObj := testapps.NewClusterDefFactory(clusterDefName). + AddComponentDef(testapps.ConsensusMySQLComponent, consensusCompDefName). + Create(&testCtx).GetObject() + clusterObj := testapps.NewClusterFactory(testCtx.DefaultNamespace, "", + clusterDefObj.Name, "").WithRandomName(). + AddComponent(consensusCompName, consensusCompDefName). + Create(&testCtx).GetObject() + Eventually(testapps.CheckObjExists(&testCtx, client.ObjectKeyFromObject(clusterObj), &appsv1alpha1.Cluster{}, true)).Should(Succeed()) + By("create involved pod") + var uid types.UID podName := "foo" - pod := createInvolvedPod(podName) - Expect(testCtx.CreateObj(ctx, &pod)).Should(Succeed()) + pod := createInvolvedPod(podName, clusterObj.Name, consensusCompName) + Expect(testCtx.CreateObj(ctx, pod)).Should(Succeed()) Eventually(func() error { p := &corev1.Pod{} + defer func() { + uid = p.UID + }() return k8sClient.Get(ctx, types.NamespacedName{ Namespace: pod.Namespace, Name: pod.Name, }, p) }).Should(Succeed()) + Expect(uid).ShouldNot(BeNil()) By("send role changed event") - sndEvent, err := createRoleChangedEvent(podName, "leader") - Expect(err).Should(Succeed()) + role := "leader" + sndEvent := createRoleChangedEvent(podName, role, uid) Expect(testCtx.CreateObj(ctx, sndEvent)).Should(Succeed()) Eventually(func() string { event := &corev1.Event{} @@ -99,82 +143,51 @@ var _ = Describe("Event Controller", func() { return event.InvolvedObject.Name }).Should(Equal(sndEvent.InvolvedObject.Name)) - By("Test parse event message") + Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(pod), func(g Gomega, p *corev1.Pod) { + g.Expect(p).ShouldNot(BeNil()) + g.Expect(p.Labels).ShouldNot(BeNil()) + g.Expect(p.Labels[constant.RoleLabelKey]).Should(Equal(role)) + })).Should(Succeed()) + + Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(sndEvent), func(g Gomega, e *corev1.Event) { + g.Expect(e).ShouldNot(BeNil()) + g.Expect(e.Annotations).ShouldNot(BeNil()) + g.Expect(e.Annotations[roleChangedAnnotKey]).Should(Equal(trueStr)) + })).Should(Succeed()) + + By("check whether the duration and number of events reach the threshold") + Expect(IsOvertimeEvent(sndEvent, 5*time.Second)).Should(BeFalse()) + }) + }) + + Context("ParseProbeEventMessage function", func() { + It("should work well", func() { reqCtx := intctrlutil.RequestCtx{ Ctx: testCtx.Ctx, Log: log.FromContext(ctx).WithValues("event", testCtx.DefaultNamespace), } - eventMessage := ParseProbeEventMessage(reqCtx, sndEvent) - Expect(eventMessage).ShouldNot(BeNil()) + event := createRoleChangedEvent("foo", "", "bar") + event.Message = "not-a-role-message" + eventMessage := ParseProbeEventMessage(reqCtx, event) + Expect(eventMessage).Should(BeNil()) + }) + }) - By("check whether the duration and number of events reach the threshold") - IsOvertimeEvent(sndEvent, 5*time.Second) + Context("IsOvertimeEvent function", func() { + It("should work well", func() { + event := createRoleChangedEvent("foo", "", "bar") + timeout := 50 * time.Millisecond + event.FirstTimestamp = metav1.NewTime(time.Now()) + event.LastTimestamp = metav1.NewTime(time.Now()) + Expect(IsOvertimeEvent(event, timeout)).Should(BeFalse()) + event.LastTimestamp = metav1.NewTime(event.LastTimestamp.Time.Add(2 * timeout)) + Expect(IsOvertimeEvent(event, timeout)).Should(BeTrue()) + + event.EventTime = metav1.NewMicroTime(time.Now()) + event.Series = &corev1.EventSeries{LastObservedTime: metav1.NewMicroTime(time.Now())} + Expect(IsOvertimeEvent(event, timeout)).Should(BeFalse()) + event.Series = &corev1.EventSeries{LastObservedTime: metav1.NewMicroTime(time.Now().Add(2 * timeout))} + Expect(IsOvertimeEvent(event, timeout)).Should(BeTrue()) }) }) }) - -func createRoleChangedEvent(podName, role string) (*corev1.Event, error) { - eventTmpl := ` -apiVersion: v1 -kind: Event -metadata: - name: {{ .PodName }}.{{ .EventSeq }} - namespace: default -involvedObject: - apiVersion: v1 - fieldPath: spec.containers{kbprobe-rolechangedcheck} - kind: Pod - name: {{ .PodName }} - namespace: default -message: "{\"event\":\"roleChanged\",\"originalRole\":\"secondary\",\"role\":\"{{ .Role }}\"}" -reason: RoleChanged -type: Normal -` - - seq, err := password.Generate(16, 16, 0, true, true) - if err != nil { - return nil, err - } - roleValue := roleEventValue{ - PodName: podName, - EventSeq: seq, - Role: role, - } - tmpl, err := template.New("event-tmpl").Parse(eventTmpl) - if err != nil { - return nil, err - } - buf := new(bytes.Buffer) - err = tmpl.Execute(buf, roleValue) - if err != nil { - return nil, err - } - - event := &corev1.Event{} - _, _, err = scheme.Codecs.UniversalDeserializer().Decode(buf.Bytes(), nil, event) - if err != nil { - return nil, err - } - event.Reason = string(probeutil.CheckRoleOperation) - - return event, nil -} - -func createInvolvedPod(name string) corev1.Pod { - podYaml := ` -apiVersion: v1 -kind: Pod -metadata: - name: my-name - namespace: default -spec: - containers: - - image: docker.io/apecloud/apecloud-mysql-server:latest - name: mysql -` - pod := corev1.Pod{} - Expect(yaml.Unmarshal([]byte(podYaml), &pod)).Should(Succeed()) - pod.Name = name - - return pod -} diff --git a/controllers/k8score/suite_test.go b/controllers/k8score/suite_test.go index 954c4060e5e..443c28aae0a 100644 --- a/controllers/k8score/suite_test.go +++ b/controllers/k8score/suite_test.go @@ -40,6 +40,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log/zap" "sigs.k8s.io/controller-runtime/pkg/manager" + appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" "github.com/apecloud/kubeblocks/internal/testutil" ) @@ -91,6 +92,9 @@ var _ = BeforeSuite(func() { err = storagev1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) + err = appsv1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + // +kubebuilder:scaffold:scheme k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) diff --git a/controllers/workloads/consensusset_controller.go b/controllers/workloads/replicatedstatemachine_controller.go similarity index 72% rename from controllers/workloads/consensusset_controller.go rename to controllers/workloads/replicatedstatemachine_controller.go index 5e7a56af786..d2d5153e17f 100644 --- a/controllers/workloads/consensusset_controller.go +++ b/controllers/workloads/replicatedstatemachine_controller.go @@ -33,40 +33,40 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" - "github.com/apecloud/kubeblocks/internal/controller/consensusset" "github.com/apecloud/kubeblocks/internal/controller/model" + "github.com/apecloud/kubeblocks/internal/controller/rsm" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" ) -// ConsensusSetReconciler reconciles a ConsensusSet object -type ConsensusSetReconciler struct { +// ReplicatedStateMachineReconciler reconciles a ReplicatedStateMachine object +type ReplicatedStateMachineReconciler struct { client.Client Scheme *runtime.Scheme Recorder record.EventRecorder } -//+kubebuilder:rbac:groups=workloads.kubeblocks.io,resources=consensussets,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=workloads.kubeblocks.io,resources=consensussets/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=workloads.kubeblocks.io,resources=consensussets/finalizers,verbs=update +//+kubebuilder:rbac:groups=workloads.kubeblocks.io,resources=replicatedstatemachines,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=workloads.kubeblocks.io,resources=replicatedstatemachines/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=workloads.kubeblocks.io,resources=replicatedstatemachines/finalizers,verbs=update // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. // TODO(user): Modify the Reconcile function to compare the state specified by -// the ConsensusSet object against the actual cluster state, and then +// the ReplicatedStateMachine object against the actual cluster state, and then // perform operations to make the cluster state reflect the state specified by // the user. // // For more details, check Reconcile and its Result here: // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.1/pkg/reconcile -func (r *ConsensusSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +func (r *ReplicatedStateMachineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { reqCtx := intctrlutil.RequestCtx{ Ctx: ctx, Req: req, - Log: log.FromContext(ctx).WithValues("ConsensusSet", req.NamespacedName), + Log: log.FromContext(ctx).WithValues("ReplicatedStateMachine", req.NamespacedName), Recorder: r.Recorder, } - reqCtx.Log.V(1).Info("reconcile", "ConsensusSet", req.NamespacedName) + reqCtx.Log.V(1).Info("reconcile", "ReplicatedStateMachine", req.NamespacedName) requeueError := func(err error) (ctrl.Result, error) { if re, ok := err.(model.RequeueError); ok { @@ -75,9 +75,9 @@ func (r *ConsensusSetReconciler) Reconcile(ctx context.Context, req ctrl.Request return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") } - // the consensus set reconciliation loop is a 3-stage model: plan Init, plan Build and plan Execute + // the RSM reconciliation loop is a two-phase model: plan Build and plan Execute // Init stage - planBuilder := consensusset.NewCSSetPlanBuilder(reqCtx, r.Client, req) + planBuilder := rsm.NewRSMPlanBuilder(reqCtx, r.Client, req) if err := planBuilder.Init(); err != nil { return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") } @@ -98,18 +98,18 @@ func (r *ConsensusSetReconciler) Reconcile(ctx context.Context, req ctrl.Request plan, err := planBuilder. AddTransformer( // fix meta - &consensusset.FixMetaTransformer{}, + &rsm.FixMetaTransformer{}, // handle deletion // handle cluster deletion first - &consensusset.CSSetDeletionTransformer{}, + &rsm.ObjectDeletionTransformer{}, // handle secondary objects generation - &consensusset.ObjectGenerationTransformer{}, + &rsm.ObjectGenerationTransformer{}, // handle status - &consensusset.CSSetStatusTransformer{}, + &rsm.ObjectStatusTransformer{}, // handle UpdateStrategy - &consensusset.UpdateStrategyTransformer{}, + &rsm.UpdateStrategyTransformer{}, // handle member reconfiguration - &consensusset.MemberReconfigurationTransformer{}, + &rsm.MemberReconfigurationTransformer{}, // always safe to put your transformer below ). Build() @@ -131,16 +131,17 @@ func (r *ConsensusSetReconciler) Reconcile(ctx context.Context, req ctrl.Request } // SetupWithManager sets up the controller with the Manager. -func (r *ConsensusSetReconciler) SetupWithManager(mgr ctrl.Manager) error { +func (r *ReplicatedStateMachineReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&workloads.ConsensusSet{}). + For(&workloads.ReplicatedStateMachine{}). Owns(&appsv1.StatefulSet{}). Owns(&batchv1.Job{}). Watches(&source.Kind{Type: &corev1.Pod{}}, - &consensusset.EnqueueRequestForAncestor{ + &rsm.EnqueueRequestForAncestor{ Client: r.Client, - OwnerType: &workloads.ConsensusSet{}, + OwnerType: &workloads.ReplicatedStateMachine{}, UpToLevel: 2, + InTypes: []runtime.Object{&appsv1.StatefulSet{}}, }). Complete(r) } diff --git a/controllers/workloads/consensusset_controller_test.go b/controllers/workloads/replicatedstatemachine_controller_test.go similarity index 83% rename from controllers/workloads/consensusset_controller_test.go rename to controllers/workloads/replicatedstatemachine_controller_test.go index 5e545cff439..a354622b126 100644 --- a/controllers/workloads/consensusset_controller_test.go +++ b/controllers/workloads/replicatedstatemachine_controller_test.go @@ -30,10 +30,10 @@ import ( testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" ) -var _ = Describe("ConsensusSet Controller", func() { +var _ = Describe("ReplicatedStateMachine Controller", func() { Context("reconciliation", func() { It("should reconcile well", func() { - name := "text-consensus-set" + name := "test-stateful-replica-set" port := int32(12345) service := corev1.ServiceSpec{ Ports: []corev1.ServicePort{ @@ -64,19 +64,19 @@ var _ = Describe("ConsensusSet Controller", func() { Image: "foo", Command: []string{"bar"}, } - csSet := builder.NewConsensusSetBuilder(testCtx.DefaultNamespace, name). + rsm := builder.NewReplicatedStateMachineBuilder(testCtx.DefaultNamespace, name). SetService(service). SetTemplate(template). AddObservationAction(action). GetObject() - Expect(k8sClient.Create(ctx, csSet)).Should(Succeed()) - Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(csSet), - func(g Gomega, set *workloads.ConsensusSet) { + Expect(k8sClient.Create(ctx, rsm)).Should(Succeed()) + Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(rsm), + func(g Gomega, set *workloads.ReplicatedStateMachine) { g.Expect(set.Status.ObservedGeneration).Should(BeEquivalentTo(1)) }), ).Should(Succeed()) - Expect(k8sClient.Delete(ctx, csSet)).Should(Succeed()) - Eventually(testapps.CheckObjExists(&testCtx, client.ObjectKeyFromObject(csSet), &workloads.ConsensusSet{}, false)). + Expect(k8sClient.Delete(ctx, rsm)).Should(Succeed()) + Eventually(testapps.CheckObjExists(&testCtx, client.ObjectKeyFromObject(rsm), &workloads.ReplicatedStateMachine{}, false)). Should(Succeed()) }) }) diff --git a/controllers/workloads/suite_test.go b/controllers/workloads/suite_test.go index 97b2f515584..82956721588 100644 --- a/controllers/workloads/suite_test.go +++ b/controllers/workloads/suite_test.go @@ -94,7 +94,7 @@ var _ = BeforeSuite(func() { Expect(err).ToNot(HaveOccurred()) recorder := k8sManager.GetEventRecorderFor("consensus-set-controller") - err = (&ConsensusSetReconciler{ + err = (&ReplicatedStateMachineReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), Recorder: recorder, diff --git a/deploy/helm/config/rbac/role.yaml b/deploy/helm/config/rbac/role.yaml index 5352a83eaa8..b215eca6b25 100644 --- a/deploy/helm/config/rbac/role.yaml +++ b/deploy/helm/config/rbac/role.yaml @@ -682,7 +682,7 @@ rules: - apiGroups: - workloads.kubeblocks.io resources: - - consensussets + - replicatedstatemachines verbs: - create - delete @@ -694,13 +694,13 @@ rules: - apiGroups: - workloads.kubeblocks.io resources: - - consensussets/finalizers + - replicatedstatemachines/finalizers verbs: - update - apiGroups: - workloads.kubeblocks.io resources: - - consensussets/status + - replicatedstatemachines/status verbs: - get - patch diff --git a/config/crd/bases/workloads.kubeblocks.io_consensussets.yaml b/deploy/helm/crds/workloads.kubeblocks.io_replicatedstatemachines.yaml similarity index 99% rename from config/crd/bases/workloads.kubeblocks.io_consensussets.yaml rename to deploy/helm/crds/workloads.kubeblocks.io_replicatedstatemachines.yaml index 4bcdc8f360e..d313fb71b4f 100644 --- a/config/crd/bases/workloads.kubeblocks.io_consensussets.yaml +++ b/deploy/helm/crds/workloads.kubeblocks.io_replicatedstatemachines.yaml @@ -5,19 +5,19 @@ metadata: annotations: controller-gen.kubebuilder.io/version: v0.9.0 creationTimestamp: null - name: consensussets.workloads.kubeblocks.io + name: replicatedstatemachines.workloads.kubeblocks.io spec: group: workloads.kubeblocks.io names: categories: - kubeblocks - all - kind: ConsensusSet - listKind: ConsensusSetList - plural: consensussets + kind: ReplicatedStateMachine + listKind: ReplicatedStateMachineList + plural: replicatedstatemachines shortNames: - - csset - singular: consensusset + - rsm + singular: replicatedstatemachine scope: Namespaced versions: - additionalPrinterColumns: @@ -39,7 +39,8 @@ spec: name: v1alpha1 schema: openAPIV3Schema: - description: ConsensusSet is the Schema for the consensussets API + description: ReplicatedStateMachine is the Schema for the replicatedstatemachines + API properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -54,13 +55,13 @@ spec: metadata: type: object spec: - description: ConsensusSetSpec defines the desired state of ConsensusSet + description: ReplicatedStateMachineSpec defines the desired state of ReplicatedStateMachine properties: credential: description: Credential used to connect to DB engine properties: password: - description: Password variable name will be KB_CONSENSUS_SET_PASSWORD + description: Password variable name will be KB_RSM_PASSWORD properties: value: description: 'Variable references $(VAR_NAME) are expanded @@ -158,7 +159,7 @@ spec: type: object type: object username: - description: Username variable name will be KB_CONSENSUS_SET_USERNAME + description: Username variable name will be KB_RSM_USERNAME properties: value: description: 'Variable references $(VAR_NAME) are expanded @@ -382,9 +383,9 @@ spec: single string of the role name defined in spec.Roles latest [BusyBox](https://busybox.net/) image will be used if Image not configured Environment variables can be used in Command: - - v_KB_CONSENSUS_SET_LAST_STDOUT stdout from last action, watch - ''v_'' prefixed - KB_CONSENSUS_SET_USERNAME username part of - credential - KB_CONSENSUS_SET_PASSWORD password part of credential' + - v_KB_RSM_LAST_STDOUT stdout from last action, watch ''v_'' + prefixed - KB_RSM_USERNAME username part of credential - KB_RSM_PASSWORD + password part of credential' items: properties: command: @@ -411,8 +412,9 @@ spec: successThreshold: default: 1 description: Minimum consecutive successes for the observation - to be considered successful after having failed. Defaults to - 1. Minimum value is 1. + to be considered successful after having failed. Minimum consecutive + successes for the observation to be considered successful after + having failed. Defaults to 1. Minimum value is 1. format: int32 minimum: 1 type: integer @@ -427,7 +429,7 @@ spec: - observationActions type: object roles: - description: Roles, a list of roles defined in this consensus system. + description: Roles, a list of roles defined in the system. items: properties: accessMode: @@ -8156,7 +8158,7 @@ spec: type: string volumeClaimTemplates: description: volumeClaimTemplates is a list of claims that pods are - allowed to reference. The ConsensusSet controller is responsible + allowed to reference. The ReplicatedStateMachine controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. @@ -8507,7 +8509,8 @@ spec: - template type: object status: - description: ConsensusSetStatus defines the observed state of ConsensusSet + description: ReplicatedStateMachineStatus defines the observed state of + ReplicatedStateMachine properties: availableReplicas: description: Total number of available pods (ready for at least minReadySeconds) diff --git a/deploy/helm/templates/admission/webhookconfiguration.yaml b/deploy/helm/templates/admission/webhookconfiguration.yaml index fa71c869e5f..b5f0881c15b 100644 --- a/deploy/helm/templates/admission/webhookconfiguration.yaml +++ b/deploy/helm/templates/admission/webhookconfiguration.yaml @@ -60,6 +60,30 @@ webhooks: resources: - clusterdefinitions sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: {{ include "kubeblocks.svcName" . }} + namespace: {{ .Release.Namespace }} + path: /mutate-workloads-kubeblocks-io-v1alpha1-replicatedstatemachine + port: {{ .Values.service.port }} + {{- if .Values.admissionWebhooks.createSelfSignedCert }} + caBundle: {{ $ca.Cert | b64enc }} + {{- end }} + failurePolicy: Fail + name: mreplicatedstatemachine.kb.io + rules: + - apiGroups: + - workloads.kubeblocks.io + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - replicatedstatemachines + sideEffects: None --- apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingWebhookConfiguration @@ -164,4 +188,28 @@ webhooks: resources: - opsrequests sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: {{ include "kubeblocks.svcName" . }} + namespace: {{ .Release.Namespace }} + path: /validate-workloads-kubeblocks-io-v1alpha1-replicatedstatemachine + port: {{ .Values.service.port }} + {{- if .Values.admissionWebhooks.createSelfSignedCert }} + caBundle: {{ $ca.Cert | b64enc }} + {{- end }} + failurePolicy: Fail + name: vreplicatedstatemachine.kb.io + rules: + - apiGroups: + - workloads.kubeblocks.io + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - replicatedstatemachines + sideEffects: None {{- end }} diff --git a/deploy/helm/templates/rbac/workloads_replicatedstatemachine_editor_role.yaml b/deploy/helm/templates/rbac/workloads_replicatedstatemachine_editor_role.yaml new file mode 100644 index 00000000000..69c5a5665e9 --- /dev/null +++ b/deploy/helm/templates/rbac/workloads_replicatedstatemachine_editor_role.yaml @@ -0,0 +1,31 @@ +# permissions for end users to edit replicatedstatemachines. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: replicatedstatemachine-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: kubeblocks + app.kubernetes.io/part-of: kubeblocks + app.kubernetes.io/managed-by: kustomize + name: replicatedstatemachine-editor-role +rules: +- apiGroups: + - workloads.kubeblocks.io + resources: + - replicatedstatemachines + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - workloads.kubeblocks.io + resources: + - replicatedstatemachines/status + verbs: + - get diff --git a/deploy/helm/templates/rbac/workloads_replicatedstatemachine_viewer_role.yaml b/deploy/helm/templates/rbac/workloads_replicatedstatemachine_viewer_role.yaml new file mode 100644 index 00000000000..087fa590ba2 --- /dev/null +++ b/deploy/helm/templates/rbac/workloads_replicatedstatemachine_viewer_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to view replicatedstatemachines. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: replicatedstatemachine-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: kubeblocks + app.kubernetes.io/part-of: kubeblocks + app.kubernetes.io/managed-by: kustomize + name: replicatedstatemachines-viewer-role +rules: +- apiGroups: + - workloads.kubeblocks.io + resources: + - replicatedstatemachines + verbs: + - get + - list + - watch +- apiGroups: + - workloads.kubeblocks.io + resources: + - replicatedstatemachines/status + verbs: + - get diff --git a/internal/controller/builder/builder_base.go b/internal/controller/builder/builder_base.go index 95e12fdc63d..2767c6dd3a0 100644 --- a/internal/controller/builder/builder_base.go +++ b/internal/controller/builder/builder_base.go @@ -24,6 +24,7 @@ import ( appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" intctrlutil "github.com/apecloud/kubeblocks/internal/generics" @@ -59,6 +60,11 @@ func (builder *BaseBuilder[T, PT, B]) SetName(name string) *B { return builder.concreteBuilder } +func (builder *BaseBuilder[T, PT, B]) SetUID(uid types.UID) *B { + builder.object.SetUID(uid) + return builder.concreteBuilder +} + func (builder *BaseBuilder[T, PT, B]) AddLabels(keysAndValues ...string) *B { builder.AddLabelsInMap(WithMap(keysAndValues...)) return builder.concreteBuilder diff --git a/internal/controller/builder/builder_base_test.go b/internal/controller/builder/builder_base_test.go index 26a6a42bac4..8492bbcc2e7 100644 --- a/internal/controller/builder/builder_base_test.go +++ b/internal/controller/builder/builder_base_test.go @@ -22,6 +22,7 @@ package builder import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/types" appsv1 "k8s.io/api/apps/v1" ) @@ -31,6 +32,7 @@ var _ = Describe("base builder", func() { const ( name = "foo" ns = "default" + uid = types.UID("foo-bar") labelKey1, labelValue1 = "foo-1", "bar-1" labelKey2, labelValue2 = "foo-2", "bar-2" labelKey3, labelValue3 = "foo-3", "bar-3" @@ -42,11 +44,12 @@ var _ = Describe("base builder", func() { annotations := map[string]string{annotationKey3: annotationValue3} controllerRevision := "wer-23e23-sedfwe--34r23" finalizer := "foo-bar" - owner := NewConsensusSetBuilder(ns, name).GetObject() + owner := NewReplicatedStateMachineBuilder(ns, name).GetObject() owner.UID = "sdfwsedqw-swed-sdswe" ownerAPIVersion := "workloads.kubeblocks.io/v1alpha1" - ownerKind := "ConsensusSet" + ownerKind := "ReplicatedStateMachine" obj := NewConfigMapBuilder(ns, name). + SetUID(uid). AddLabels(labelKey1, labelValue1, labelKey2, labelValue2). AddLabelsInMap(labels). AddAnnotations(annotationKey1, annotationValue1, annotationKey2, annotationValue2). @@ -58,6 +61,7 @@ var _ = Describe("base builder", func() { Expect(obj.Name).Should(Equal(name)) Expect(obj.Namespace).Should(Equal(ns)) + Expect(obj.UID).Should(Equal(uid)) Expect(len(obj.Labels)).Should(Equal(4)) Expect(obj.Labels[labelKey1]).Should(Equal(labelValue1)) Expect(obj.Labels[labelKey2]).Should(Equal(labelValue2)) diff --git a/internal/controller/builder/builder_event.go b/internal/controller/builder/builder_event.go new file mode 100644 index 00000000000..e6c3327bb16 --- /dev/null +++ b/internal/controller/builder/builder_event.go @@ -0,0 +1,54 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package builder + +import ( + corev1 "k8s.io/api/core/v1" +) + +type EventBuilder struct { + BaseBuilder[corev1.Event, *corev1.Event, EventBuilder] +} + +func NewEventBuilder(namespace, name string) *EventBuilder { + builder := &EventBuilder{} + builder.init(namespace, name, &corev1.Event{}, builder) + return builder +} + +func (builder *EventBuilder) SetInvolvedObject(objectRef corev1.ObjectReference) *EventBuilder { + builder.get().InvolvedObject = objectRef + return builder +} + +func (builder *EventBuilder) SetMessage(message string) *EventBuilder { + builder.get().Message = message + return builder +} + +func (builder *EventBuilder) SetReason(reason string) *EventBuilder { + builder.get().Reason = reason + return builder +} + +func (builder *EventBuilder) SetType(tp string) *EventBuilder { + builder.get().Type = tp + return builder +} diff --git a/internal/controller/builder/builder_event_test.go b/internal/controller/builder/builder_event_test.go new file mode 100644 index 00000000000..33aad067b6a --- /dev/null +++ b/internal/controller/builder/builder_event_test.go @@ -0,0 +1,61 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package builder + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" +) + +var _ = Describe("event builder", func() { + It("should work well", func() { + const ( + name = "foo" + ns = "default" + uid = types.UID("bar") + ) + objectRef := corev1.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: ns, + Name: name, + UID: uid, + } + message := "foo-bar" + reason := "reason" + tp := corev1.EventTypeNormal + event := NewEventBuilder(ns, "foo"). + SetInvolvedObject(objectRef). + SetMessage(message). + SetReason(reason). + SetType(tp). + GetObject() + + Expect(event.Name).Should(Equal(name)) + Expect(event.Namespace).Should(Equal(ns)) + Expect(event.InvolvedObject).Should(Equal(objectRef)) + Expect(event.Message).Should(Equal(message)) + Expect(event.Reason).Should(Equal(reason)) + Expect(event.Type).Should(Equal(tp)) + }) +}) diff --git a/internal/controller/consensusset/suite_test.go b/internal/controller/builder/builder_pvc.go similarity index 60% rename from internal/controller/consensusset/suite_test.go rename to internal/controller/builder/builder_pvc.go index a8420312a17..0f2f6cb79ff 100644 --- a/internal/controller/consensusset/suite_test.go +++ b/internal/controller/builder/builder_pvc.go @@ -17,32 +17,21 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package consensusset +package builder -import ( - "testing" +import corev1 "k8s.io/api/core/v1" - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -// These tests use Ginkgo (BDD-style Go testing framework). Refer to -// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. - -func init() { +type PVCBuilder struct { + BaseBuilder[corev1.PersistentVolumeClaim, *corev1.PersistentVolumeClaim, PVCBuilder] } -func TestAPIs(t *testing.T) { - RegisterFailHandler(Fail) - - RunSpecs(t, "ConsensusSet Suite") +func NewPVCBuilder(namespace, name string) *PVCBuilder { + builder := &PVCBuilder{} + builder.init(namespace, name, &corev1.PersistentVolumeClaim{}, builder) + return builder } -var _ = BeforeSuite(func() { - go func() { - defer GinkgoRecover() - }() -}) - -var _ = AfterSuite(func() { -}) +func (builder *PVCBuilder) SetResources(resources corev1.ResourceRequirements) *PVCBuilder { + builder.get().Spec.Resources = resources + return builder +} diff --git a/internal/controller/builder/builder_pvc_test.go b/internal/controller/builder/builder_pvc_test.go new file mode 100644 index 00000000000..518fe0b1e5b --- /dev/null +++ b/internal/controller/builder/builder_pvc_test.go @@ -0,0 +1,49 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package builder + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +var _ = Describe("pvc builder", func() { + It("should work well", func() { + const ( + name = "foo" + ns = "default" + ) + resources := corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + "CPU": resource.MustParse("500m"), + }, + } + pvc := NewPVCBuilder(ns, name). + SetResources(resources). + GetObject() + + Expect(pvc.Name).Should(Equal(name)) + Expect(pvc.Namespace).Should(Equal(ns)) + Expect(pvc.Spec.Resources).Should(Equal(resources)) + }) +}) diff --git a/internal/controller/builder/builder_consensus_set.go b/internal/controller/builder/builder_replicated_state_machine.go similarity index 50% rename from internal/controller/builder/builder_consensus_set.go rename to internal/controller/builder/builder_replicated_state_machine.go index cb73be60f00..509d691fcd2 100644 --- a/internal/controller/builder/builder_consensus_set.go +++ b/internal/controller/builder/builder_replicated_state_machine.go @@ -25,17 +25,17 @@ import ( workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" ) -type ConsensusSetBuilder struct { - BaseBuilder[workloads.ConsensusSet, *workloads.ConsensusSet, ConsensusSetBuilder] +type ReplicatedStateMachineBuilder struct { + BaseBuilder[workloads.ReplicatedStateMachine, *workloads.ReplicatedStateMachine, ReplicatedStateMachineBuilder] } -func NewConsensusSetBuilder(namespace, name string) *ConsensusSetBuilder { - builder := &ConsensusSetBuilder{} +func NewReplicatedStateMachineBuilder(namespace, name string) *ReplicatedStateMachineBuilder { + builder := &ReplicatedStateMachineBuilder{} builder.init(namespace, name, - &workloads.ConsensusSet{ - Spec: workloads.ConsensusSetSpec{ + &workloads.ReplicatedStateMachine{ + Spec: workloads.ReplicatedStateMachineSpec{ Replicas: 1, - Roles: []workloads.ConsensusRole{ + Roles: []workloads.ReplicaRole{ { Name: "leader", AccessMode: workloads.ReadWriteMode, @@ -49,34 +49,44 @@ func NewConsensusSetBuilder(namespace, name string) *ConsensusSetBuilder { return builder } -func (builder *ConsensusSetBuilder) SetReplicas(replicas int32) *ConsensusSetBuilder { +func (builder *ReplicatedStateMachineBuilder) SetReplicas(replicas int32) *ReplicatedStateMachineBuilder { builder.get().Spec.Replicas = replicas return builder } -func (builder *ConsensusSetBuilder) SetRoles(roles []workloads.ConsensusRole) *ConsensusSetBuilder { +func (builder *ReplicatedStateMachineBuilder) SetRoles(roles []workloads.ReplicaRole) *ReplicatedStateMachineBuilder { builder.get().Spec.Roles = roles return builder } -func (builder *ConsensusSetBuilder) SetTemplate(template corev1.PodTemplateSpec) *ConsensusSetBuilder { +func (builder *ReplicatedStateMachineBuilder) SetTemplate(template corev1.PodTemplateSpec) *ReplicatedStateMachineBuilder { builder.get().Spec.Template = template return builder } -func (builder *ConsensusSetBuilder) SetObservationActions(actions []workloads.Action) *ConsensusSetBuilder { +func (builder *ReplicatedStateMachineBuilder) SetObservationActions(actions []workloads.Action) *ReplicatedStateMachineBuilder { builder.get().Spec.RoleObservation.ObservationActions = actions return builder } -func (builder *ConsensusSetBuilder) AddObservationAction(action workloads.Action) *ConsensusSetBuilder { +func (builder *ReplicatedStateMachineBuilder) AddObservationAction(action workloads.Action) *ReplicatedStateMachineBuilder { actions := builder.get().Spec.RoleObservation.ObservationActions actions = append(actions, action) builder.get().Spec.RoleObservation.ObservationActions = actions return builder } -func (builder *ConsensusSetBuilder) SetService(service corev1.ServiceSpec) *ConsensusSetBuilder { +func (builder *ReplicatedStateMachineBuilder) SetService(service corev1.ServiceSpec) *ReplicatedStateMachineBuilder { builder.get().Spec.Service = service return builder } + +func (builder *ReplicatedStateMachineBuilder) SetMembershipReconfiguration(reconfiguration workloads.MembershipReconfiguration) *ReplicatedStateMachineBuilder { + builder.get().Spec.MembershipReconfiguration = &reconfiguration + return builder +} + +func (builder *ReplicatedStateMachineBuilder) SetCredential(credential workloads.Credential) *ReplicatedStateMachineBuilder { + builder.get().Spec.Credential = &credential + return builder +} diff --git a/internal/controller/builder/builder_consensus_set_test.go b/internal/controller/builder/builder_replicated_state_machine_test.go similarity index 59% rename from internal/controller/builder/builder_consensus_set_test.go rename to internal/controller/builder/builder_replicated_state_machine_test.go index bd3ec9d2fa0..b02b0268ca7 100644 --- a/internal/controller/builder/builder_consensus_set_test.go +++ b/internal/controller/builder/builder_replicated_state_machine_test.go @@ -28,7 +28,7 @@ import ( workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" ) -var _ = Describe("consensus_set builder", func() { +var _ = Describe("replicated_state_machine builder", func() { It("should work well", func() { const ( name = "foo" @@ -36,12 +36,18 @@ var _ = Describe("consensus_set builder", func() { replicas = int32(5) port = int32(12345) ) - role := workloads.ConsensusRole{ + role := workloads.ReplicaRole{ Name: "foo", AccessMode: workloads.ReadWriteMode, IsLeader: true, CanVote: true, } + reconfiguration := workloads.MembershipReconfiguration{ + SwitchoverAction: &workloads.Action{ + Image: name, + Command: []string{"bar"}, + }, + } pod := NewPodBuilder(ns, "foo"). AddContainer(corev1.Container{ Name: "foo", @@ -77,24 +83,34 @@ var _ = Describe("consensus_set builder", func() { }, }, } - csSet := NewConsensusSetBuilder(ns, name). + credential := workloads.Credential{ + Username: workloads.CredentialVar{Value: "foo"}, + Password: workloads.CredentialVar{Value: "bar"}, + } + rsm := NewReplicatedStateMachineBuilder(ns, name). SetReplicas(replicas). - SetRoles([]workloads.ConsensusRole{role}). + SetRoles([]workloads.ReplicaRole{role}). + SetMembershipReconfiguration(reconfiguration). SetTemplate(template). SetObservationActions(actions). AddObservationAction(action). SetService(service). + SetCredential(credential). GetObject() - Expect(csSet.Name).Should(Equal(name)) - Expect(csSet.Namespace).Should(Equal(ns)) - Expect(csSet.Spec.Replicas).Should(Equal(replicas)) - Expect(len(csSet.Spec.Roles)).Should(Equal(1)) - Expect(csSet.Spec.Roles[0]).Should(Equal(role)) - Expect(csSet.Spec.Template).Should(Equal(template)) - Expect(len(csSet.Spec.RoleObservation.ObservationActions)).Should(Equal(2)) - Expect(csSet.Spec.RoleObservation.ObservationActions[0]).Should(Equal(actions[0])) - Expect(csSet.Spec.RoleObservation.ObservationActions[1]).Should(Equal(action)) - Expect(csSet.Spec.Service).Should(Equal(service)) + Expect(rsm.Name).Should(Equal(name)) + Expect(rsm.Namespace).Should(Equal(ns)) + Expect(rsm.Spec.Replicas).Should(Equal(replicas)) + Expect(len(rsm.Spec.Roles)).Should(Equal(1)) + Expect(rsm.Spec.Roles[0]).Should(Equal(role)) + Expect(rsm.Spec.MembershipReconfiguration).ShouldNot(BeNil()) + Expect(*rsm.Spec.MembershipReconfiguration).Should(Equal(reconfiguration)) + Expect(rsm.Spec.Template).Should(Equal(template)) + Expect(len(rsm.Spec.RoleObservation.ObservationActions)).Should(Equal(2)) + Expect(rsm.Spec.RoleObservation.ObservationActions[0]).Should(Equal(actions[0])) + Expect(rsm.Spec.RoleObservation.ObservationActions[1]).Should(Equal(action)) + Expect(rsm.Spec.Service).Should(Equal(service)) + Expect(rsm.Spec.Credential).ShouldNot(BeNil()) + Expect(*rsm.Spec.Credential).Should(Equal(credential)) }) }) diff --git a/internal/controller/graph/dag.go b/internal/controller/graph/dag.go index f5ec859a272..0b8073c41d4 100644 --- a/internal/controller/graph/dag.go +++ b/internal/controller/graph/dag.go @@ -22,6 +22,7 @@ package graph import ( "errors" "fmt" + "sort" ) type DAG struct { @@ -148,7 +149,7 @@ func (d *DAG) WalkTopoOrder(walkFunc WalkFunc) error { if err := d.validate(); err != nil { return err } - orders := d.topologicalOrder(false) + orders := d.topologicalOrder(false, nil) for _, v := range orders { if err := walkFunc(v); err != nil { return err @@ -162,7 +163,7 @@ func (d *DAG) WalkReverseTopoOrder(walkFunc WalkFunc) error { if err := d.validate(); err != nil { return err } - orders := d.topologicalOrder(true) + orders := d.topologicalOrder(true, nil) for _, v := range orders { if err := walkFunc(v); err != nil { return err @@ -208,6 +209,31 @@ func (d *DAG) WalkBFS(walkFunc WalkFunc) error { return nil } +// Equals tells whether two DAGs are equal +// `less` tells whether vertex 'v1' is less than vertex 'v2'. +// `less` should return false if 'v1' equals to 'v2'. +func (d *DAG) Equals(other *DAG, less func(v1, v2 Vertex) bool) bool { + if other == nil || less == nil { + return false + } + // sort both DAGs in topology order. + // a DAG may have more than one topology order, func 'less' is used to eliminate randomness + // and hence only one deterministic order is generated. + vertices1 := d.topologicalOrder(false, less) + vertices2 := other.topologicalOrder(false, less) + + // compare them + if len(vertices1) != len(vertices2) { + return false + } + for i := range vertices1 { + if less(vertices1[i], vertices2[i]) { + return false + } + } + return true +} + // Root returns root vertex that has no in adjacent. // our DAG should have one and only one root vertex func (d *DAG) Root() Vertex { @@ -295,7 +321,7 @@ func (d *DAG) validate() error { // topologicalOrder returns a vertex list that is in topology order // 'd' MUST be a legal DAG -func (d *DAG) topologicalOrder(reverse bool) []Vertex { +func (d *DAG) topologicalOrder(reverse bool, less func(v1, v2 Vertex) bool) []Vertex { // orders is what we want, a (reverse) topological order of this DAG orders := make([]Vertex, 0) @@ -314,13 +340,24 @@ func (d *DAG) topologicalOrder(reverse bool) []Vertex { } else { adjacent = d.inAdj(v) } + if less != nil { + sort.SliceStable(adjacent, func(i, j int) bool { + return less(adjacent[i], adjacent[j]) + }) + } for _, vertex := range adjacent { walk(vertex) } walked[v] = true orders = append(orders, v) } - for v := range d.vertices { + vertexLst := d.Vertices() + if less != nil { + sort.SliceStable(vertexLst, func(i, j int) bool { + return less(vertexLst[i], vertexLst[j]) + }) + } + for _, v := range vertexLst { walk(v) } return orders diff --git a/internal/controller/graph/dag_test.go b/internal/controller/graph/dag_test.go index e02f403df63..5303298922a 100644 --- a/internal/controller/graph/dag_test.go +++ b/internal/controller/graph/dag_test.go @@ -122,3 +122,90 @@ func TestRemoveVertex(t *testing.T) { t.Error("unexpected edges", len(dag.edges)) } } + +func TestEquals(t *testing.T) { + d1 := NewDAG() + d2 := NewDAG() + for i := 0; i < 13; i++ { + d1.AddVertex(i) + d2.AddVertex(12 - i) + } + d1.Connect(2, 3) + d1.Connect(0, 6) + d1.Connect(0, 1) + d1.Connect(2, 0) + d1.Connect(11, 12) + d1.Connect(9, 12) + d1.Connect(9, 10) + d1.Connect(9, 11) + d1.Connect(3, 5) + d1.Connect(8, 7) + d1.Connect(5, 4) + d1.Connect(0, 5) + d1.Connect(6, 4) + d1.Connect(6, 9) + d1.Connect(7, 6) + d1.Connect(7, 2) + d1.Connect(3, 0) + d1.Connect(12, 10) + d1.Connect(10, 1) + d1.Connect(1, 5) + + // add edges in reverse order + d2.Connect(1, 5) + d2.Connect(10, 1) + d2.Connect(12, 10) + d2.Connect(3, 0) + d2.Connect(7, 2) + d2.Connect(7, 6) + d2.Connect(6, 9) + d2.Connect(6, 4) + d2.Connect(0, 5) + d2.Connect(5, 4) + d2.Connect(8, 7) + d2.Connect(3, 5) + d2.Connect(9, 11) + d2.Connect(9, 10) + d2.Connect(9, 12) + d2.Connect(11, 12) + d2.Connect(2, 0) + d2.Connect(0, 1) + d2.Connect(0, 6) + d2.Connect(2, 3) + + less := func(v1, v2 Vertex) bool { + val1, _ := v1.(int) + val2, _ := v2.(int) + return val1 < val2 + } + if !d1.Equals(d2, less) { + t.Error("equals test failed") + } + + d1 = NewDAG() + d2 = NewDAG() + + d1.AddVertex(0) + d1.AddVertex(1) + d1.AddVertex(2) + d1.AddVertex(3) + d1.AddVertex(4) + d2.AddVertex(0) + d2.AddVertex(2) + d2.AddVertex(3) + d2.AddVertex(1) + d2.AddVertex(4) + + d1.Connect(0, 1) + d1.Connect(0, 2) + d1.Connect(0, 3) + d1.Connect(0, 4) + d2.Connect(0, 2) + d2.Connect(0, 3) + d2.Connect(0, 4) + d2.Connect(0, 1) + + if !d1.Equals(d2, less) { + t.Error("equals test failed") + } +} diff --git a/internal/controller/model/transform_types.go b/internal/controller/model/transform_types.go index ec7e1373140..aa87f97078e 100644 --- a/internal/controller/model/transform_types.go +++ b/internal/controller/model/transform_types.go @@ -49,16 +49,6 @@ const ( STATUS = Action("STATUS") ) -const ( - AppInstanceLabelKey = "app.kubernetes.io/instance" - KBManagedByKey = "apps.kubeblocks.io/managed-by" - RoleLabelKey = "kubeblocks.io/role" - ConsensusSetAccessModeLabelKey = "cs.apps.kubeblocks.io/access-mode" -) - -// RequeueDuration default reconcile requeue after duration -var RequeueDuration = time.Millisecond * 100 - type GVKName struct { gvk schema.GroupVersionKind ns, name string diff --git a/internal/controller/rsm/doc.go b/internal/controller/rsm/doc.go new file mode 100644 index 00000000000..1d95ff5fe68 --- /dev/null +++ b/internal/controller/rsm/doc.go @@ -0,0 +1,33 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +/* +Package rsm is a general component aims to hold role-based stateful workloads(such as databases). +RSM stands for Replicated State Machine based on the truth that the workloads are solving state replication related problems. + +The K8s native StatefulSet can handle stateful workloads well, +but there are more works to do if the workload pods have roles(leader/follower in etcd, primary/secondary in PostgreSQL etc.). + +RSM adds an abstract layer above StatefulSet, and provides: +1. role-based update strategy(Serial/Parallel/BestEffortParallel) +2. role-based access modes(ReadWrite/Readonly/None) +3. auto switchover +4. membership reconfiguration +*/ +package rsm diff --git a/internal/controller/consensusset/enqueue_ancestor.go b/internal/controller/rsm/enqueue_ancestor.go similarity index 78% rename from internal/controller/consensusset/enqueue_ancestor.go rename to internal/controller/rsm/enqueue_ancestor.go index 74ef2c20526..a25ec971baa 100644 --- a/internal/controller/consensusset/enqueue_ancestor.go +++ b/internal/controller/rsm/enqueue_ancestor.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package consensusset +package rsm import ( "context" @@ -49,11 +49,11 @@ var log = logf.FromContext(context.Background()).WithName("eventhandler").WithNa // EnqueueRequestForAncestor enqueues Requests for the ancestor object. // E.g. the ancestor object creates the StatefulSet/Deployment which then creates the Pod. // -// If a ConsensusSet creates Pods, users may reconcile the ConsensusSet in response to Pod Events using: +// If a ReplicatedStateMachine creates Pods, users may reconcile the ReplicatedStateMachine in response to Pod Events using: // // - a source.Kind Source with Type of Pod. // -// - a EnqueueRequestForAncestor EventHandler with an OwnerType of ConsensusSet and UpToLevel set to 2. +// - a EnqueueRequestForAncestor EventHandler with an OwnerType of ReplicatedStateMachine and UpToLevel set to 2. // // If source kind is corev1.Event, Event.InvolvedObject will be used as the source kind type EnqueueRequestForAncestor struct { @@ -66,8 +66,16 @@ type EnqueueRequestForAncestor struct { // find event source up to UpToLevel UpToLevel int + // InTypes specified the range to look for the ancestor, means all ancestors' type in the looking up tree should be in InTypes. + // OwnerType will be included. + // nil means only look for in OwnerType. + InTypes []runtime.Object + // groupKind is the cached Group and Kind from OwnerType - groupKind schema.GroupKind + groupKind *schema.GroupKind + + // ancestorGroupKinds is the cached Group and Kind from InTypes + ancestorGroupKinds []schema.GroupKind // mapper maps GroupVersionKinds to Resources mapper meta.RESTMapper @@ -115,21 +123,46 @@ func (e *EnqueueRequestForAncestor) Generic(evt event.GenericEvent, q workqueue. // parseOwnerTypeGroupKind parses the OwnerType into a Group and Kind and caches the result. Returns false // if the OwnerType could not be parsed using the scheme. func (e *EnqueueRequestForAncestor) parseOwnerTypeGroupKind(scheme *runtime.Scheme) error { - // Get the kinds of the type - kinds, _, err := scheme.ObjectKinds(e.OwnerType) + gk, err := e.parseTypeGroupKind(e.OwnerType, scheme) if err != nil { - log.Error(err, "Could not get ObjectKinds for OwnerType", "owner type", fmt.Sprintf("%T", e.OwnerType)) return err } + // Cache the Group and Kind for the OwnerType + e.groupKind = gk + return nil +} + +// parseInTypesGroupKind parses the InTypes into a Group and Kind and caches the result. Returns false +// if the InTypes could not be parsed using the scheme. +func (e *EnqueueRequestForAncestor) parseInTypesGroupKind(scheme *runtime.Scheme) error { + if e.groupKind != nil { + e.ancestorGroupKinds = append(e.ancestorGroupKinds, *e.groupKind) + } + for _, inType := range e.InTypes { + gk, err := e.parseTypeGroupKind(inType, scheme) + if err != nil { + return err + } + // Cache the Group and Kind for the inType + e.ancestorGroupKinds = append(e.ancestorGroupKinds, *gk) + } + return nil +} + +func (e *EnqueueRequestForAncestor) parseTypeGroupKind(object runtime.Object, scheme *runtime.Scheme) (*schema.GroupKind, error) { + // Get the kinds of the type + kinds, _, err := scheme.ObjectKinds(object) + if err != nil { + log.Error(err, "Could not get ObjectKinds", "object", fmt.Sprintf("%T", object)) + return nil, err + } // Expect only 1 kind. If there is more than one kind this is probably an edge case such as ListOptions. if len(kinds) != 1 { - err := fmt.Errorf("expected exactly 1 kind for OwnerType %T, but found %s kinds", e.OwnerType, kinds) - log.Error(nil, "expected exactly 1 kind for OwnerType", "owner type", fmt.Sprintf("%T", e.OwnerType), "kinds", kinds) - return err + err := fmt.Errorf("expected exactly 1 kind for object %T, but found %s kinds", object, kinds) + log.Error(nil, "expected exactly 1 kind for object", "object", fmt.Sprintf("%T", object), "kinds", kinds) + return nil, err } - // Cache the Group and Kind for the OwnerType - e.groupKind = schema.GroupKind{Group: kinds[0].Group, Kind: kinds[0].Kind} - return nil + return &schema.GroupKind{Group: kinds[0].Group, Kind: kinds[0].Kind}, nil } // getOwnerReconcileRequest looks at object and builds a map of reconcile.Request to reconcile @@ -147,7 +180,7 @@ func (e *EnqueueRequestForAncestor) getOwnerReconcileRequest(obj client.Object, ctx := context.Background() ref, err := e.getOwnerUpTo(ctx, object, e.UpToLevel, scheme) if err != nil { - log.Info("cloud not find top object", + log.Info("cloud not find ancestor object", "source object gvk", object.GetObjectKind().GroupVersionKind(), "name", object.GetName(), "up to level", e.UpToLevel, @@ -155,7 +188,7 @@ func (e *EnqueueRequestForAncestor) getOwnerReconcileRequest(obj client.Object, return } if ref == nil { - log.Info("cloud not find top object", + log.Info("cloud not find ancestor object", "source object gvk", object.GetObjectKind().GroupVersionKind(), "name", object.GetName(), "up to level", e.UpToLevel) @@ -181,7 +214,7 @@ func (e *EnqueueRequestForAncestor) getOwnerReconcileRequest(obj client.Object, }} // if owner is not namespaced then we should set the namespace to the empty - mapping, err := e.mapper.RESTMapping(e.groupKind, refGV.Version) + mapping, err := e.mapper.RESTMapping(*e.groupKind, refGV.Version) if err != nil { log.Error(err, "Could not retrieve rest mapping", "kind", e.groupKind) return @@ -221,14 +254,17 @@ func (e *EnqueueRequestForAncestor) getSourceObject(object client.Object) (clien } // getOwnerUpTo gets the owner of object up to upToLevel. -// E.g. If ConsensusSet creates the StatefulSet which then creates the Pod, -// if the object is the Pod, then set upToLevel to 2 if you want to find the ConsensusSet. +// E.g. If ReplicatedStateMachine creates the StatefulSet which then creates the Pod, +// if the object is the Pod, then set upToLevel to 2 if you want to find the ReplicatedStateMachine. // Each level of ownership should be a controller-relationship (i.e. controller=true in ownerReferences). // nil return if no owner find in any level. func (e *EnqueueRequestForAncestor) getOwnerUpTo(ctx context.Context, object client.Object, upToLevel int, scheme runtime.Scheme) (*metav1.OwnerReference, error) { if upToLevel <= 0 { return nil, nil } + if object == nil { + return nil, nil + } ownerRef := metav1.GetControllerOf(object) if ownerRef == nil { return nil, nil @@ -253,6 +289,9 @@ func (e *EnqueueRequestForAncestor) getObjectByOwnerRef(ctx context.Context, own Version: gv.Version, Kind: ownerRef.Kind, } + if !e.inAncestorRange(gvk) { + return nil, nil + } objectRT, err := scheme.New(gvk) if err != nil { return nil, err @@ -279,11 +318,23 @@ func (e *EnqueueRequestForAncestor) getObjectByOwnerRef(ctx context.Context, own return object, nil } +func (e *EnqueueRequestForAncestor) inAncestorRange(gvk schema.GroupVersionKind) bool { + for _, groupKind := range e.ancestorGroupKinds { + if gvk.Group == groupKind.Group && gvk.Kind == groupKind.Kind { + return true + } + } + return false +} + var _ inject.Scheme = &EnqueueRequestForAncestor{} // InjectScheme is called by the Controller to provide a singleton scheme to the EnqueueRequestForAncestor. func (e *EnqueueRequestForAncestor) InjectScheme(s *runtime.Scheme) error { - return e.parseOwnerTypeGroupKind(s) + if err := e.parseOwnerTypeGroupKind(s); err != nil { + return err + } + return e.parseInTypesGroupKind(s) } var _ inject.Mapper = &EnqueueRequestForAncestor{} diff --git a/internal/controller/rsm/enqueue_ancestor_test.go b/internal/controller/rsm/enqueue_ancestor_test.go new file mode 100644 index 00000000000..0f5576f1732 --- /dev/null +++ b/internal/controller/rsm/enqueue_ancestor_test.go @@ -0,0 +1,399 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package rsm + +import ( + "context" + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/golang/mock/gomock" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" + "github.com/apecloud/kubeblocks/internal/controller/builder" + "github.com/apecloud/kubeblocks/internal/controller/model" +) + +func init() { + model.AddScheme(workloads.AddToScheme) +} + +var _ = Describe("enqueue ancestor", func() { + scheme := model.GetScheme() + var handler *EnqueueRequestForAncestor + + buildAncestorTree := func() (*workloads.ReplicatedStateMachine, *appsv1.StatefulSet, *corev1.Pod) { + ancestorL2APIVersion := "workloads.kubeblocks.io/v1alpha1" + ancestorL2Kind := "ReplicatedStateMachine" + ancestorL2Name := "ancestor-level-2" + ancestorL1APIVersion := "apps/v1" + ancestorL1Kind := "StatefulSet" + ancestorL1Name := "ancestor-level-1" + objectName := ancestorL1Name + "-0" + + ancestorLevel2 := builder.NewReplicatedStateMachineBuilder(namespace, ancestorL2Name).GetObject() + ancestorLevel2.APIVersion = ancestorL2APIVersion + ancestorLevel2.Kind = ancestorL2Kind + ancestorLevel1 := builder.NewStatefulSetBuilder(namespace, ancestorL1Name). + SetOwnerReferences(ancestorL2APIVersion, ancestorL2Kind, ancestorLevel2). + GetObject() + ancestorLevel1.APIVersion = ancestorL1APIVersion + ancestorLevel1.Kind = ancestorL1Kind + object := builder.NewPodBuilder(namespace, objectName). + SetOwnerReferences(ancestorL1APIVersion, ancestorL1Kind, ancestorLevel1). + GetObject() + + return ancestorLevel2, ancestorLevel1, object + } + + BeforeEach(func() { + handler = &EnqueueRequestForAncestor{ + Client: k8sMock, + OwnerType: &workloads.ReplicatedStateMachine{}, + UpToLevel: 2, + InTypes: []runtime.Object{&appsv1.StatefulSet{}}, + } + }) + + Context("parseOwnerTypeGroupKind", func() { + It("should work well", func() { + Expect(handler.parseOwnerTypeGroupKind(scheme)).Should(Succeed()) + Expect(handler.groupKind.Group).Should(Equal("workloads.kubeblocks.io")) + Expect(handler.groupKind.Kind).Should(Equal("ReplicatedStateMachine")) + }) + }) + + Context("parseInTypesGroupKind", func() { + It("should work well", func() { + Expect(handler.parseInTypesGroupKind(scheme)).Should(Succeed()) + Expect(handler.ancestorGroupKinds).Should(HaveLen(1)) + Expect(handler.ancestorGroupKinds[0].Group).Should(Equal("apps")) + Expect(handler.ancestorGroupKinds[0].Kind).Should(Equal("StatefulSet")) + }) + }) + + Context("getObjectByOwnerRef", func() { + BeforeEach(func() { + Expect(handler.InjectScheme(scheme)).Should(Succeed()) + Expect(handler.InjectMapper(newFakeMapper())).Should(Succeed()) + }) + + It("should return err if groupVersion parsing error", func() { + wrongAPIVersion := "wrong/group/version" + ownerRef := metav1.OwnerReference{ + APIVersion: wrongAPIVersion, + } + _, err := handler.getObjectByOwnerRef(ctx, namespace, ownerRef, *scheme) + Expect(err).ShouldNot(BeNil()) + Expect(err.Error()).Should(ContainSubstring(wrongAPIVersion)) + }) + + It("should return nil if ancestor's type out of range", func() { + ownerRef := metav1.OwnerReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "foo", + UID: "bar", + } + object, err := handler.getObjectByOwnerRef(ctx, namespace, ownerRef, *scheme) + Expect(err).Should(BeNil()) + Expect(object).Should(BeNil()) + }) + + It("should return the owner object", func() { + ownerName := "foo" + ownerUID := types.UID("bar") + ownerRef := metav1.OwnerReference{ + APIVersion: "apps/v1", + Kind: "StatefulSet", + Name: ownerName, + UID: ownerUID, + } + k8sMock.EXPECT(). + Get(gomock.Any(), gomock.Any(), &appsv1.StatefulSet{}, gomock.Any()). + DoAndReturn(func(_ context.Context, _ client.ObjectKey, obj *appsv1.StatefulSet, _ ...client.ListOption) error { + obj.Name = ownerName + obj.UID = ownerUID + return nil + }).Times(1) + object, err := handler.getObjectByOwnerRef(ctx, namespace, ownerRef, *scheme) + Expect(err).Should(BeNil()) + Expect(object).ShouldNot(BeNil()) + Expect(object.GetName()).Should(Equal(ownerName)) + Expect(object.GetUID()).Should(Equal(ownerUID)) + }) + }) + + Context("getOwnerUpTo", func() { + BeforeEach(func() { + Expect(handler.InjectScheme(scheme)).Should(Succeed()) + Expect(handler.InjectMapper(newFakeMapper())).Should(Succeed()) + }) + + It("should work well", func() { + By("set upToLevel to 0") + ownerRef, err := handler.getOwnerUpTo(ctx, nil, 0, *scheme) + Expect(err).Should(BeNil()) + Expect(ownerRef).Should(BeNil()) + + By("set object to nil") + ownerRef, err = handler.getOwnerUpTo(ctx, nil, handler.UpToLevel, *scheme) + Expect(err).Should(BeNil()) + Expect(ownerRef).Should(BeNil()) + + By("builder ancestor tree") + ancestorLevel2, ancestorLevel1, object := buildAncestorTree() + + By("set upToLevel to 1") + ownerRef, err = handler.getOwnerUpTo(ctx, object, 1, *scheme) + Expect(err).Should(BeNil()) + Expect(ownerRef).ShouldNot(BeNil()) + Expect(ownerRef.APIVersion).Should(Equal(ancestorLevel1.APIVersion)) + Expect(ownerRef.Kind).Should(Equal(ancestorLevel1.Kind)) + Expect(ownerRef.Name).Should(Equal(ancestorLevel1.Name)) + Expect(ownerRef.UID).Should(Equal(ancestorLevel1.UID)) + + By("set upToLevel to 2") + k8sMock.EXPECT(). + Get(gomock.Any(), gomock.Any(), &appsv1.StatefulSet{}, gomock.Any()). + DoAndReturn(func(_ context.Context, objKey client.ObjectKey, sts *appsv1.StatefulSet, _ ...client.ListOptions) error { + sts.Namespace = objKey.Namespace + sts.Name = objKey.Name + sts.OwnerReferences = ancestorLevel1.OwnerReferences + return nil + }).Times(1) + ownerRef, err = handler.getOwnerUpTo(ctx, object, handler.UpToLevel, *scheme) + Expect(err).Should(BeNil()) + Expect(ownerRef).ShouldNot(BeNil()) + Expect(ownerRef.APIVersion).Should(Equal(ancestorLevel2.APIVersion)) + Expect(ownerRef.Kind).Should(Equal(ancestorLevel2.Kind)) + Expect(ownerRef.Name).Should(Equal(ancestorLevel2.Name)) + Expect(ownerRef.UID).Should(Equal(ancestorLevel2.UID)) + }) + }) + + Context("getSourceObject", func() { + BeforeEach(func() { + Expect(handler.InjectScheme(scheme)).Should(Succeed()) + Expect(handler.InjectMapper(newFakeMapper())).Should(Succeed()) + }) + + It("should work well", func() { + By("build a non-event object") + name := "foo" + uid := types.UID("bar") + object1 := builder.NewPodBuilder(namespace, name).SetUID(uid).GetObject() + objectSrc1, err := handler.getSourceObject(object1) + Expect(err).Should(BeNil()) + Expect(objectSrc1).Should(Equal(object1)) + + By("build an event object") + handler.InTypes = append(handler.InTypes, &corev1.Pod{}) + Expect(handler.InjectScheme(scheme)).Should(Succeed()) + objectRef := corev1.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: namespace, + Name: object1.Name, + UID: object1.UID, + } + object2 := builder.NewEventBuilder(namespace, "foo"). + SetInvolvedObject(objectRef). + GetObject() + k8sMock.EXPECT(). + Get(gomock.Any(), gomock.Any(), &corev1.Pod{}, gomock.Any()). + DoAndReturn(func(_ context.Context, objKey client.ObjectKey, obj *corev1.Pod, _ ...client.ListOptions) error { + obj.Name = objKey.Name + obj.Namespace = objKey.Namespace + obj.UID = objectRef.UID + return nil + }).Times(1) + objectSrc2, err := handler.getSourceObject(object2) + Expect(err).Should(BeNil()) + Expect(objectSrc2).ShouldNot(BeNil()) + Expect(objectSrc2.GetName()).Should(Equal(object1.Name)) + Expect(objectSrc2.GetNamespace()).Should(Equal(object1.Namespace)) + Expect(objectSrc2.GetUID()).Should(Equal(object1.UID)) + }) + }) + + Context("getOwnerReconcileRequest", func() { + BeforeEach(func() { + Expect(handler.InjectScheme(scheme)).Should(Succeed()) + Expect(handler.InjectMapper(newFakeMapper())).Should(Succeed()) + }) + + It("should work well", func() { + By("build ancestor tree") + ancestorLevel2, ancestorLevel1, object := buildAncestorTree() + + k8sMock.EXPECT(). + Get(gomock.Any(), gomock.Any(), &appsv1.StatefulSet{}, gomock.Any()). + DoAndReturn(func(_ context.Context, objKey client.ObjectKey, sts *appsv1.StatefulSet, _ ...client.ListOptions) error { + sts.Namespace = objKey.Namespace + sts.Name = objKey.Name + sts.OwnerReferences = ancestorLevel1.OwnerReferences + return nil + }).Times(1) + + By("get object with ancestors") + result := make(map[reconcile.Request]empty) + handler.getOwnerReconcileRequest(object, result) + Expect(result).Should(HaveLen(1)) + for request := range result { + Expect(request.Namespace).Should(Equal(ancestorLevel2.Namespace)) + Expect(request.Name).Should(Equal(ancestorLevel2.Name)) + } + + By("set obj not exist") + wrongAPIVersion := "wrong/api/version" + object.OwnerReferences[0].APIVersion = wrongAPIVersion + result = make(map[reconcile.Request]empty) + handler.getOwnerReconcileRequest(object, result) + Expect(result).Should(HaveLen(0)) + + By("set level 1 ancestor's owner not exist") + object.OwnerReferences[0].APIVersion = ancestorLevel1.APIVersion + k8sMock.EXPECT(). + Get(gomock.Any(), gomock.Any(), &appsv1.StatefulSet{}, gomock.Any()). + DoAndReturn(func(_ context.Context, objKey client.ObjectKey, sts *appsv1.StatefulSet, _ ...client.ListOptions) error { + sts.Namespace = objKey.Namespace + sts.Name = objKey.Name + return nil + }).Times(1) + result = make(map[reconcile.Request]empty) + handler.getOwnerReconcileRequest(object, result) + Expect(result).Should(HaveLen(0)) + }) + }) + + Context("handler interface", func() { + BeforeEach(func() { + Expect(handler.InjectScheme(scheme)).Should(Succeed()) + Expect(handler.InjectMapper(newFakeMapper())).Should(Succeed()) + }) + + It("should work well", func() { + By("build events and queue") + queue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "enqueue-ancestor-test") + ancestorLevel2, ancestorLevel1, object := buildAncestorTree() + createEvent := event.CreateEvent{Object: object} + updateEvent := event.UpdateEvent{ObjectOld: object, ObjectNew: object} + deleteEvent := event.DeleteEvent{Object: object} + genericEvent := event.GenericEvent{Object: object} + + cases := []struct { + name string + testFunc func() + getTimes int + }{ + { + name: "Create", + testFunc: func() { handler.Create(createEvent, queue) }, + getTimes: 1, + }, + { + name: "Update", + testFunc: func() { handler.Update(updateEvent, queue) }, + getTimes: 2, + }, + { + name: "Delete", + testFunc: func() { handler.Delete(deleteEvent, queue) }, + getTimes: 1, + }, + { + name: "Generic", + testFunc: func() { handler.Generic(genericEvent, queue) }, + getTimes: 1, + }, + } + for _, c := range cases { + By(fmt.Sprintf("test %s interface", c.name)) + k8sMock.EXPECT(). + Get(gomock.Any(), gomock.Any(), &appsv1.StatefulSet{}, gomock.Any()). + DoAndReturn(func(_ context.Context, objKey client.ObjectKey, sts *appsv1.StatefulSet, _ ...client.ListOptions) error { + sts.Namespace = objKey.Namespace + sts.Name = objKey.Name + sts.OwnerReferences = ancestorLevel1.OwnerReferences + return nil + }).Times(c.getTimes) + c.testFunc() + item, shutdown := queue.Get() + Expect(shutdown).Should(BeFalse()) + request, ok := item.(reconcile.Request) + Expect(ok).Should(BeTrue()) + Expect(request.Namespace).Should(Equal(ancestorLevel2.Namespace)) + Expect(request.Name).Should(Equal(ancestorLevel2.Name)) + queue.Done(item) + queue.Forget(item) + } + + queue.ShutDown() + }) + }) +}) + +type fakeMapper struct{} + +func (f *fakeMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + return schema.GroupVersionKind{}, nil +} + +func (f *fakeMapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + return nil, nil +} + +func (f *fakeMapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { + return schema.GroupVersionResource{}, nil +} + +func (f *fakeMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + return nil, nil +} + +func (f *fakeMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + return &meta.RESTMapping{Scope: meta.RESTScopeNamespace}, nil +} + +func (f *fakeMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { + return nil, nil +} + +func (f *fakeMapper) ResourceSingularizer(resource string) (singular string, err error) { + return "", nil +} + +func newFakeMapper() meta.RESTMapper { + return &fakeMapper{} +} diff --git a/internal/controller/consensusset/plan_builder.go b/internal/controller/rsm/plan_builder.go similarity index 76% rename from internal/controller/consensusset/plan_builder.go rename to internal/controller/rsm/plan_builder.go index a2e4bb141f6..98decbdd5ba 100644 --- a/internal/controller/consensusset/plan_builder.go +++ b/internal/controller/rsm/plan_builder.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package consensusset +package rsm import ( "errors" @@ -36,46 +36,50 @@ import ( intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" ) -type csSetPlanBuilder struct { +type PlanBuilder struct { req ctrl.Request cli client.Client - transCtx *CSSetTransformContext + transCtx *rsmTransformContext transformers graph.TransformerChain } -type csSetPlan struct { +var _ graph.PlanBuilder = &PlanBuilder{} + +type Plan struct { dag *graph.DAG walkFunc graph.WalkFunc cli client.Client - transCtx *CSSetTransformContext + transCtx *rsmTransformContext } +var _ graph.Plan = &Plan{} + func init() { model.AddScheme(workloads.AddToScheme) } // PlanBuilder implementation -func (b *csSetPlanBuilder) Init() error { - csSet := &workloads.ConsensusSet{} - if err := b.cli.Get(b.transCtx.Context, b.req.NamespacedName, csSet); err != nil { +func (b *PlanBuilder) Init() error { + rsm := &workloads.ReplicatedStateMachine{} + if err := b.cli.Get(b.transCtx.Context, b.req.NamespacedName, rsm); err != nil { return err } - b.AddTransformer(&initTransformer{ConsensusSet: csSet}) + b.AddTransformer(&initTransformer{ReplicatedStateMachine: rsm}) return nil } -func (b *csSetPlanBuilder) AddTransformer(transformer ...graph.Transformer) graph.PlanBuilder { +func (b *PlanBuilder) AddTransformer(transformer ...graph.Transformer) graph.PlanBuilder { b.transformers = append(b.transformers, transformer...) return b } -func (b *csSetPlanBuilder) AddParallelTransformer(transformer ...graph.Transformer) graph.PlanBuilder { +func (b *PlanBuilder) AddParallelTransformer(transformer ...graph.Transformer) graph.PlanBuilder { b.transformers = append(b.transformers, &model.ParallelTransformer{Transformers: transformer}) return b } -func (b *csSetPlanBuilder) Build() (graph.Plan, error) { +func (b *PlanBuilder) Build() (graph.Plan, error) { var err error // new a DAG and apply chain on it, after that we should get the final Plan dag := graph.NewDAG() @@ -84,9 +88,9 @@ func (b *csSetPlanBuilder) Build() (graph.Plan, error) { b.transCtx.Logger.Info(fmt.Sprintf("DAG: %s", dag)) // we got the execution plan - plan := &csSetPlan{ + plan := &Plan{ dag: dag, - walkFunc: b.csSetWalkFunc, + walkFunc: b.rsmWalkFunc, cli: b.cli, transCtx: b.transCtx, } @@ -95,13 +99,13 @@ func (b *csSetPlanBuilder) Build() (graph.Plan, error) { // Plan implementation -func (p *csSetPlan) Execute() error { +func (p *Plan) Execute() error { return p.dag.WalkReverseTopoOrder(p.walkFunc) } // Do the real works -func (b *csSetPlanBuilder) csSetWalkFunc(v graph.Vertex) error { +func (b *PlanBuilder) rsmWalkFunc(v graph.Vertex) error { vertex, ok := v.(*model.ObjectVertex) if !ok { return fmt.Errorf("wrong vertex type %v", v) @@ -129,7 +133,7 @@ func (b *csSetPlanBuilder) csSetWalkFunc(v graph.Vertex) error { return err } case model.DELETE: - if controllerutil.RemoveFinalizer(vertex.Obj, csSetFinalizerName) { + if controllerutil.RemoveFinalizer(vertex.Obj, rsmFinalizerName) { err := b.cli.Update(b.transCtx.Context, vertex.Obj) if err != nil && !apierrors.IsNotFound(err) { b.transCtx.Logger.Error(err, fmt.Sprintf("delete %T error: %s", vertex.Obj, vertex.Obj.GetName())) @@ -151,7 +155,7 @@ func (b *csSetPlanBuilder) csSetWalkFunc(v graph.Vertex) error { return nil } -func (b *csSetPlanBuilder) buildUpdateObj(vertex *model.ObjectVertex) (client.Object, error) { +func (b *PlanBuilder) buildUpdateObj(vertex *model.ObjectVertex) (client.Object, error) { handleSts := func(origObj, targetObj *appsv1.StatefulSet) (client.Object, error) { origObj.Spec.Template = targetObj.Spec.Template origObj.Spec.Replicas = targetObj.Spec.Replicas @@ -159,11 +163,6 @@ func (b *csSetPlanBuilder) buildUpdateObj(vertex *model.ObjectVertex) (client.Ob return origObj, nil } - handleDeploy := func(origObj, targetObj *appsv1.Deployment) (client.Object, error) { - origObj.Spec = targetObj.Spec - return origObj, nil - } - handleSvc := func(origObj, targetObj *corev1.Service) (client.Object, error) { origObj.Spec = targetObj.Spec return origObj, nil @@ -173,6 +172,12 @@ func (b *csSetPlanBuilder) buildUpdateObj(vertex *model.ObjectVertex) (client.Ob if origObj.Spec.Resources.Requests[corev1.ResourceStorage] == targetObj.Spec.Resources.Requests[corev1.ResourceStorage] { return origObj, nil } + if targetObj.Spec.Resources.Requests == nil { + return origObj, nil + } + if origObj.Spec.Resources.Requests == nil { + origObj.Spec.Resources.Requests = corev1.ResourceList{} + } origObj.Spec.Resources.Requests[corev1.ResourceStorage] = targetObj.Spec.Resources.Requests[corev1.ResourceStorage] return origObj, nil } @@ -181,8 +186,6 @@ func (b *csSetPlanBuilder) buildUpdateObj(vertex *model.ObjectVertex) (client.Ob switch v := vertex.Obj.(type) { case *appsv1.StatefulSet: return handleSts(origObj.(*appsv1.StatefulSet), v) - case *appsv1.Deployment: - return handleDeploy(origObj.(*appsv1.Deployment), v) case *corev1.Service: return handleSvc(origObj.(*corev1.Service), v) case *corev1.PersistentVolumeClaim: @@ -194,12 +197,12 @@ func (b *csSetPlanBuilder) buildUpdateObj(vertex *model.ObjectVertex) (client.Ob return vertex.Obj, nil } -// NewCSSetPlanBuilder returns a csSetPlanBuilder powered PlanBuilder -func NewCSSetPlanBuilder(ctx intctrlutil.RequestCtx, cli client.Client, req ctrl.Request) graph.PlanBuilder { - return &csSetPlanBuilder{ +// NewRSMPlanBuilder returns a RSMPlanBuilder powered PlanBuilder +func NewRSMPlanBuilder(ctx intctrlutil.RequestCtx, cli client.Client, req ctrl.Request) graph.PlanBuilder { + return &PlanBuilder{ req: req, cli: cli, - transCtx: &CSSetTransformContext{ + transCtx: &rsmTransformContext{ Context: ctx.Ctx, Client: cli, EventRecorder: ctx.Recorder, @@ -207,6 +210,3 @@ func NewCSSetPlanBuilder(ctx intctrlutil.RequestCtx, cli client.Client, req ctrl }, } } - -var _ graph.PlanBuilder = &csSetPlanBuilder{} -var _ graph.Plan = &csSetPlan{} diff --git a/internal/controller/rsm/plan_builder_test.go b/internal/controller/rsm/plan_builder_test.go new file mode 100644 index 00000000000..fd00846e3f0 --- /dev/null +++ b/internal/controller/rsm/plan_builder_test.go @@ -0,0 +1,215 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package rsm + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" + "github.com/apecloud/kubeblocks/internal/controller/builder" + "github.com/apecloud/kubeblocks/internal/controller/model" + intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" + mockclient "github.com/apecloud/kubeblocks/internal/testutil/k8s/mocks" + "github.com/golang/mock/gomock" + apps "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe("plan builder test", func() { + Context("rsmWalkFunc function", func() { + var rsmBuilder *PlanBuilder + + BeforeEach(func() { + cli := k8sMock + reqCtx := intctrlutil.RequestCtx{ + Ctx: ctx, + Log: logger, + } + req := ctrl.Request{} + planBuilder := NewRSMPlanBuilder(reqCtx, cli, req) + rsmBuilder, _ = planBuilder.(*PlanBuilder) + + rsm = builder.NewReplicatedStateMachineBuilder(namespace, name). + AddFinalizers([]string{rsmFinalizerName}). + GetObject() + }) + + It("should create object", func() { + v := &model.ObjectVertex{ + Obj: rsm, + Action: model.ActionPtr(model.CREATE), + } + k8sMock.EXPECT(). + Create(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, obj *workloads.ReplicatedStateMachine, _ ...client.CreateOption) error { + Expect(obj).ShouldNot(BeNil()) + Expect(obj.Namespace).Should(Equal(rsm.Namespace)) + Expect(obj.Name).Should(Equal(rsm.Name)) + Expect(obj.Finalizers).Should(Equal(rsm.Finalizers)) + return nil + }).Times(1) + Expect(rsmBuilder.rsmWalkFunc(v)).Should(Succeed()) + }) + + It("should update sts object", func() { + stsOrig := builder.NewStatefulSetBuilder(namespace, name).SetReplicas(3).GetObject() + sts := stsOrig.DeepCopy() + replicas := int32(5) + sts.Spec.Replicas = &replicas + v := &model.ObjectVertex{ + OriObj: stsOrig, + Obj: sts, + Action: model.ActionPtr(model.UPDATE), + } + k8sMock.EXPECT(). + Update(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, obj *apps.StatefulSet, _ ...client.UpdateOption) error { + Expect(obj).ShouldNot(BeNil()) + Expect(obj.Namespace).Should(Equal(sts.Namespace)) + Expect(obj.Name).Should(Equal(sts.Name)) + Expect(obj.Spec.Replicas).Should(Equal(sts.Spec.Replicas)) + Expect(obj.Spec.Template).Should(Equal(sts.Spec.Template)) + Expect(obj.Spec.UpdateStrategy).Should(Equal(sts.Spec.UpdateStrategy)) + return nil + }).Times(1) + Expect(rsmBuilder.rsmWalkFunc(v)).Should(Succeed()) + }) + + It("should update svc object", func() { + svcOrig := builder.NewServiceBuilder(namespace, name).SetType(corev1.ServiceTypeLoadBalancer).GetObject() + svc := svcOrig.DeepCopy() + svc.Spec.Selector = map[string]string{"foo": "bar"} + v := &model.ObjectVertex{ + OriObj: svcOrig, + Obj: svc, + Action: model.ActionPtr(model.UPDATE), + } + k8sMock.EXPECT(). + Update(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, obj *corev1.Service, _ ...client.UpdateOption) error { + Expect(obj).ShouldNot(BeNil()) + Expect(obj.Namespace).Should(Equal(svc.Namespace)) + Expect(obj.Name).Should(Equal(svc.Name)) + Expect(obj.Spec).Should(Equal(svc.Spec)) + return nil + }).Times(1) + Expect(rsmBuilder.rsmWalkFunc(v)).Should(Succeed()) + }) + + It("should update pvc object", func() { + pvcOrig := builder.NewPVCBuilder(namespace, name).GetObject() + pvc := pvcOrig.DeepCopy() + pvc.Spec.Resources = corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceStorage: resource.MustParse("500m"), + }, + } + v := &model.ObjectVertex{ + OriObj: pvcOrig, + Obj: pvc, + Action: model.ActionPtr(model.UPDATE), + } + k8sMock.EXPECT(). + Update(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, obj *corev1.PersistentVolumeClaim, _ ...client.UpdateOption) error { + Expect(obj).ShouldNot(BeNil()) + Expect(obj.Namespace).Should(Equal(pvc.Namespace)) + Expect(obj.Name).Should(Equal(pvc.Name)) + Expect(obj.Spec.Resources).Should(Equal(pvc.Spec.Resources)) + return nil + }).Times(1) + Expect(rsmBuilder.rsmWalkFunc(v)).Should(Succeed()) + }) + + It("should delete object", func() { + v := &model.ObjectVertex{ + Obj: rsm, + Action: model.ActionPtr(model.DELETE), + } + k8sMock.EXPECT(). + Update(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, obj *workloads.ReplicatedStateMachine, _ ...client.UpdateOption) error { + Expect(obj).ShouldNot(BeNil()) + Expect(obj.Finalizers).Should(HaveLen(0)) + return nil + }).Times(1) + k8sMock.EXPECT(). + Delete(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, obj *workloads.ReplicatedStateMachine, _ ...client.DeleteOption) error { + Expect(obj).ShouldNot(BeNil()) + Expect(obj.Namespace).Should(Equal(rsm.Namespace)) + Expect(obj.Name).Should(Equal(rsm.Name)) + Expect(obj.Finalizers).Should(HaveLen(0)) + return nil + }).Times(1) + Expect(rsmBuilder.rsmWalkFunc(v)).Should(Succeed()) + }) + + It("should update object status", func() { + rsm.Generation = 2 + rsm.Status.ObservedGeneration = 2 + rsmOrig := rsm.DeepCopy() + rsmOrig.Status.ObservedGeneration = 1 + + v := &model.ObjectVertex{ + Obj: rsm, + OriObj: rsmOrig, + Action: model.ActionPtr(model.STATUS), + } + ct := gomock.NewController(GinkgoT()) + statusWriter := mockclient.NewMockStatusWriter(ct) + + gomock.InOrder( + k8sMock.EXPECT().Status().Return(statusWriter), + statusWriter.EXPECT(). + Patch(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, obj *workloads.ReplicatedStateMachine, patch client.Patch, _ ...client.PatchOption) error { + Expect(obj).ShouldNot(BeNil()) + Expect(obj.Namespace).Should(Equal(rsm.Namespace)) + Expect(obj.Name).Should(Equal(rsm.Name)) + Expect(obj.Status.ObservedGeneration).Should(Equal(rsm.Status.ObservedGeneration)) + return nil + }).Times(1), + ) + Expect(rsmBuilder.rsmWalkFunc(v)).Should(Succeed()) + }) + + It("should return error if no action set", func() { + v := &model.ObjectVertex{} + err := rsmBuilder.rsmWalkFunc(v) + Expect(err).ShouldNot(BeNil()) + Expect(err.Error()).Should(ContainSubstring("vertex action can't be nil")) + }) + + It("should return nil and do nothing if immutable=true", func() { + v := &model.ObjectVertex{ + Action: model.ActionPtr(model.UPDATE), + Immutable: true, + } + Expect(rsmBuilder.rsmWalkFunc(v)).Should(Succeed()) + }) + }) +}) diff --git a/internal/controller/consensusset/pod_role_event_handler.go b/internal/controller/rsm/pod_role_event_handler.go similarity index 94% rename from internal/controller/consensusset/pod_role_event_handler.go rename to internal/controller/rsm/pod_role_event_handler.go index 10af81e284b..5d770983d47 100644 --- a/internal/controller/consensusset/pod_role_event_handler.go +++ b/internal/controller/rsm/pod_role_event_handler.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package consensusset +package rsm import ( "encoding/json" @@ -61,6 +61,8 @@ const ( probeEventRoleInvalid probeEventType = "roleInvalid" ) +var roleMessageRegex = regexp.MustCompile(`Readiness probe failed: .*({.*})`) + func (h *PodRoleEventHandler) Handle(cli client.Client, reqCtx intctrlutil.RequestCtx, recorder record.EventRecorder, event *corev1.Event) error { if event.InvolvedObject.FieldPath != roleObservationEventFieldPath { return nil @@ -118,20 +120,20 @@ func handleRoleChangedEvent(cli client.Client, reqCtx intctrlutil.RequestCtx, re return role, nil } name := pod.Labels[constant.AppInstanceLabelKey] - csSet := &workloads.ConsensusSet{} - if err := cli.Get(reqCtx.Ctx, types.NamespacedName{Namespace: pod.Namespace, Name: name}, csSet); err != nil { + rsm := &workloads.ReplicatedStateMachine{} + if err := cli.Get(reqCtx.Ctx, types.NamespacedName{Namespace: pod.Namespace, Name: name}, rsm); err != nil { return "", err } reqCtx.Log.V(1).Info("handle role change event", "pod", pod.Name, "role", role, "originalRole", message.OriginalRole) - return role, updatePodRoleLabel(cli, reqCtx, *csSet, pod, role) + return role, updatePodRoleLabel(cli, reqCtx, *rsm, pod, role) } // parseProbeEventMessage parses probe event message. func parseProbeEventMessage(reqCtx intctrlutil.RequestCtx, event *corev1.Event) *probeMessage { message := &probeMessage{} - re := regexp.MustCompile(`Readiness probe failed: ({.*})`) - matches := re.FindStringSubmatch(event.Message) + + matches := roleMessageRegex.FindStringSubmatch(event.Message) if len(matches) != 2 { reqCtx.Log.Info("parser Readiness probe event message failed", "message", event.Message) return nil diff --git a/internal/controller/rsm/pod_role_event_handler_test.go b/internal/controller/rsm/pod_role_event_handler_test.go new file mode 100644 index 00000000000..9b6c90f318b --- /dev/null +++ b/internal/controller/rsm/pod_role_event_handler_test.go @@ -0,0 +1,149 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package rsm + +import ( + "context" + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/golang/mock/gomock" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" + "github.com/apecloud/kubeblocks/internal/constant" + "github.com/apecloud/kubeblocks/internal/controller/builder" + intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" +) + +var _ = Describe("pod role label event handler test", func() { + Context("Handle function", func() { + It("should work well", func() { + cli := k8sMock + reqCtx := intctrlutil.RequestCtx{ + Ctx: ctx, + Log: logger, + } + pod := builder.NewPodBuilder(namespace, getPodName(name, 0)).SetUID(uid).GetObject() + objectRef := corev1.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: pod.Namespace, + Name: pod.Name, + UID: pod.UID, + FieldPath: roleObservationEventFieldPath, + } + role := workloads.ReplicaRole{ + Name: "leader", + AccessMode: workloads.ReadWriteMode, + IsLeader: true, + CanVote: true, + } + + By("build an expected message") + message := fmt.Sprintf("Readiness probe failed: error: health rpc failed: rpc error: code = Unknown desc = {\"event\":\"Success\",\"originalRole\":\"\",\"role\":\"%s\"}", role.Name) + event := builder.NewEventBuilder(namespace, "foo"). + SetInvolvedObject(objectRef). + SetMessage(message). + GetObject() + + handler := &PodRoleEventHandler{} + k8sMock.EXPECT(). + Get(gomock.Any(), gomock.Any(), &corev1.Pod{}, gomock.Any()). + DoAndReturn(func(_ context.Context, objKey client.ObjectKey, p *corev1.Pod, _ ...client.GetOptions) error { + p.Namespace = objKey.Namespace + p.Name = objKey.Name + p.UID = pod.UID + p.Labels = map[string]string{constant.AppInstanceLabelKey: name} + return nil + }).Times(1) + k8sMock.EXPECT(). + Get(gomock.Any(), gomock.Any(), &workloads.ReplicatedStateMachine{}, gomock.Any()). + DoAndReturn(func(_ context.Context, objKey client.ObjectKey, rsm *workloads.ReplicatedStateMachine, _ ...client.GetOptions) error { + rsm.Namespace = objKey.Namespace + rsm.Name = objKey.Name + rsm.Spec.Roles = []workloads.ReplicaRole{role} + return nil + }).Times(1) + k8sMock.EXPECT(). + Patch(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, pd *corev1.Pod, patch client.Patch, _ ...client.PatchOption) error { + Expect(pd).ShouldNot(BeNil()) + Expect(pd.Labels).ShouldNot(BeNil()) + Expect(pd.Labels[roleLabelKey]).Should(Equal(role.Name)) + Expect(pd.Labels[rsmAccessModeLabelKey]).Should(BeEquivalentTo(role.AccessMode)) + return nil + }).Times(1) + k8sMock.EXPECT(). + Patch(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, evt *corev1.Event, patch client.Patch, _ ...client.PatchOption) error { + Expect(evt).ShouldNot(BeNil()) + Expect(evt.Annotations).ShouldNot(BeNil()) + Expect(evt.Annotations[roleChangedAnnotKey]).Should(Equal(fmt.Sprintf("count-%d", evt.Count))) + return nil + }).Times(1) + Expect(handler.Handle(cli, reqCtx, nil, event)).Should(Succeed()) + + By("build an unexpected message") + message = fmt.Sprintf("unexpected message") + event = builder.NewEventBuilder(namespace, "foo"). + SetInvolvedObject(objectRef). + SetMessage(message). + GetObject() + k8sMock.EXPECT(). + Patch(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, evt *corev1.Event, patch client.Patch, _ ...client.PatchOption) error { + Expect(evt).ShouldNot(BeNil()) + Expect(evt.Annotations).ShouldNot(BeNil()) + Expect(evt.Annotations[roleChangedAnnotKey]).Should(Equal(fmt.Sprintf("count-%d", evt.Count))) + return nil + }).Times(1) + Expect(handler.Handle(cli, reqCtx, nil, event)).Should(Succeed()) + }) + }) + + Context("parseProbeEventMessage function", func() { + It("should work well", func() { + reqCtx := intctrlutil.RequestCtx{ + Ctx: ctx, + Log: logf.FromContext(ctx).WithValues("pod-role-event-handler", namespace), + } + + By("build an well formatted message") + roleName := "leader" + message := fmt.Sprintf("Readiness probe failed: error: health rpc failed: rpc error: code = Unknown desc = {\"event\":\"Success\",\"originalRole\":\"\",\"role\":\"%s\"}", roleName) + event := builder.NewEventBuilder(namespace, "foo"). + SetMessage(message). + GetObject() + msg := parseProbeEventMessage(reqCtx, event) + Expect(msg).ShouldNot(BeNil()) + Expect(msg.Role).Should(Equal(roleName)) + + By("build an error formatted message") + message = fmt.Sprintf("Readiness probe failed: error: health rpc failed: rpc error: code = Unknown desc = {\"event\":}") + event.Message = message + Expect(parseProbeEventMessage(reqCtx, event)).Should(BeNil()) + }) + }) +}) diff --git a/internal/controller/rsm/suite_test.go b/internal/controller/rsm/suite_test.go new file mode 100644 index 00000000000..ad450272baf --- /dev/null +++ b/internal/controller/rsm/suite_test.go @@ -0,0 +1,228 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package rsm + +import ( + "context" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/go-logr/logr" + "github.com/golang/mock/gomock" + apps "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" + "github.com/apecloud/kubeblocks/internal/controller/builder" + "github.com/apecloud/kubeblocks/internal/controller/graph" + "github.com/apecloud/kubeblocks/internal/controller/model" + testutil "github.com/apecloud/kubeblocks/internal/testutil/k8s" + "github.com/apecloud/kubeblocks/internal/testutil/k8s/mocks" +) + +var ( + controller *gomock.Controller + k8sMock *mocks.MockClient + ctx context.Context + logger logr.Logger + transCtx *rsmTransformContext + dag *graph.DAG + transformer graph.Transformer +) + +const ( + namespace = "foo" + name = "bar" + oldRevision = "old-revision" + newRevision = "new-revision" +) + +var ( + uid = types.UID("rsm-mock-uid") + + roles = []workloads.ReplicaRole{ + { + Name: "leader", + IsLeader: true, + CanVote: true, + AccessMode: workloads.ReadWriteMode, + }, + { + Name: "follower", + IsLeader: false, + CanVote: true, + AccessMode: workloads.ReadonlyMode, + }, + { + Name: "logger", + IsLeader: false, + CanVote: true, + AccessMode: workloads.NoneMode, + }, + { + Name: "learner", + IsLeader: false, + CanVote: false, + AccessMode: workloads.ReadonlyMode, + }, + } + + reconfiguration = workloads.MembershipReconfiguration{ + SwitchoverAction: &workloads.Action{Command: []string{"cmd"}}, + MemberJoinAction: &workloads.Action{Command: []string{"cmd"}}, + MemberLeaveAction: &workloads.Action{Command: []string{"cmd"}}, + } + + service = corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + { + Name: "svc", + Protocol: corev1.ProtocolTCP, + Port: 12345, + TargetPort: intstr.FromString("my-svc"), + }, + }, + } + + credential = workloads.Credential{ + Username: workloads.CredentialVar{Value: "foo"}, + Password: workloads.CredentialVar{Value: "bar"}, + } + + pod = builder.NewPodBuilder(namespace, getPodName(name, 0)). + AddContainer(corev1.Container{ + Name: "foo", + Image: "bar", + Ports: []corev1.ContainerPort{ + { + Name: "my-svc", + Protocol: corev1.ProtocolTCP, + ContainerPort: 12345, + }, + }, + }).GetObject() + template = corev1.PodTemplateSpec{ + ObjectMeta: pod.ObjectMeta, + Spec: pod.Spec, + } + + observeActions = []workloads.Action{{Command: []string{"cmd"}}} + + rsm *workloads.ReplicatedStateMachine +) + +func kindPriority(o client.Object) int { + switch o.(type) { + case nil: + return 0 + case *workloads.ReplicatedStateMachine: + return 1 + case *apps.StatefulSet: + return 2 + case *corev1.Service: + return 3 + case *corev1.ConfigMap: + return 4 + default: + return 5 + } +} + +func less(v1, v2 graph.Vertex) bool { + o1, _ := v1.(*model.ObjectVertex) + o2, _ := v2.(*model.ObjectVertex) + switch { + case o1.Immutable != o2.Immutable: + return false + case o1.Action == nil && o2.Action == nil: + case o1.Action != nil, o2.Action != nil: + return false + case *o1.Action != *o2.Action: + return false + } + p1 := kindPriority(o1.Obj) + p2 := kindPriority(o2.Obj) + if p1 == p2 { + // TODO(free6om): compare each field of same kind + return o1.Obj.GetName() < o2.Obj.GetName() + } + return p1 < p2 +} + +func makePodUpdateReady(newRevision string, pods ...*corev1.Pod) { + readyCondition := corev1.PodCondition{ + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + } + for _, pod := range pods { + pod.Labels[apps.StatefulSetRevisionLabel] = newRevision + if pod.Labels[roleLabelKey] == "" { + pod.Labels[roleLabelKey] = "learner" + } + pod.Status.Conditions = append(pod.Status.Conditions, readyCondition) + } +} + +func mockUnderlyingSts(rsm workloads.ReplicatedStateMachine, generation int64) *apps.StatefulSet { + headLessSvc := buildHeadlessSvc(rsm) + envConfig := buildEnvConfigMap(rsm) + sts := buildSts(rsm, headLessSvc.Name, *envConfig) + sts.Generation = generation + sts.Status.ObservedGeneration = generation + sts.Status.Replicas = *sts.Spec.Replicas + sts.Status.ReadyReplicas = sts.Status.Replicas + sts.Status.AvailableReplicas = sts.Status.ReadyReplicas + return sts +} + +func mockDAG() *graph.DAG { + d := graph.NewDAG() + model.PrepareStatus(d, transCtx.rsmOrig, transCtx.rsm) + return d +} + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "ReplicatedStateMachine Suite") +} + +var _ = BeforeSuite(func() { + controller, k8sMock = testutil.SetupK8sMock() + ctx = context.Background() + logger = logf.FromContext(ctx).WithValues("rsm-test", namespace) + + go func() { + defer GinkgoRecover() + }() +}) + +var _ = AfterSuite(func() { + controller.Finish() +}) diff --git a/internal/controller/consensusset/transformer_deletion.go b/internal/controller/rsm/transformer_deletion.go similarity index 76% rename from internal/controller/consensusset/transformer_deletion.go rename to internal/controller/rsm/transformer_deletion.go index 220a6d72b4a..baac1862389 100644 --- a/internal/controller/consensusset/transformer_deletion.go +++ b/internal/controller/rsm/transformer_deletion.go @@ -17,21 +17,24 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package consensusset +package rsm import ( "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/apecloud/kubeblocks/internal/constant" "github.com/apecloud/kubeblocks/internal/controller/graph" "github.com/apecloud/kubeblocks/internal/controller/model" ) -// CSSetDeletionTransformer handles ConsensusSet deletion -type CSSetDeletionTransformer struct{} +// ObjectDeletionTransformer handles object and its secondary resources' deletion +type ObjectDeletionTransformer struct{} -func (t *CSSetDeletionTransformer) Transform(ctx graph.TransformContext, dag *graph.DAG) error { - transCtx, _ := ctx.(*CSSetTransformContext) - obj := transCtx.CSSet +var _ graph.Transformer = &ObjectDeletionTransformer{} + +func (t *ObjectDeletionTransformer) Transform(ctx graph.TransformContext, dag *graph.DAG) error { + transCtx, _ := ctx.(*rsmTransformContext) + obj := transCtx.rsm if !model.IsObjectDeleting(obj) { return nil } @@ -40,7 +43,7 @@ func (t *CSSetDeletionTransformer) Transform(ctx graph.TransformContext, dag *gr // there is chance that objects leak occurs because of cache stale // ignore the problem currently // TODO: GC the leaked objects - ml := client.MatchingLabels{model.AppInstanceLabelKey: obj.Name} + ml := client.MatchingLabels{constant.AppInstanceLabelKey: obj.Name} snapshot, err := model.ReadCacheSnapshot(transCtx, obj, ml, deletionKinds()...) if err != nil { return err @@ -56,5 +59,3 @@ func (t *CSSetDeletionTransformer) Transform(ctx graph.TransformContext, dag *gr // fast return, that is stopping the plan.Build() stage and jump to plan.Execute() directly return graph.ErrPrematureStop } - -var _ graph.Transformer = &CSSetDeletionTransformer{} diff --git a/internal/controller/rsm/transformer_deletion_test.go b/internal/controller/rsm/transformer_deletion_test.go new file mode 100644 index 00000000000..b7d3ca5bfb8 --- /dev/null +++ b/internal/controller/rsm/transformer_deletion_test.go @@ -0,0 +1,128 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package rsm + +import ( + "context" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/golang/mock/gomock" + apps "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/apecloud/kubeblocks/internal/constant" + "github.com/apecloud/kubeblocks/internal/controller/builder" + "github.com/apecloud/kubeblocks/internal/controller/graph" + "github.com/apecloud/kubeblocks/internal/controller/model" +) + +var _ = Describe("object deletion transformer test.", func() { + BeforeEach(func() { + rsm = builder.NewReplicatedStateMachineBuilder(namespace, name). + SetUID(uid). + SetReplicas(3). + SetRoles(roles). + SetMembershipReconfiguration(reconfiguration). + SetService(service). + GetObject() + + transCtx = &rsmTransformContext{ + Context: ctx, + Client: k8sMock, + EventRecorder: nil, + Logger: logger, + rsmOrig: rsm.DeepCopy(), + rsm: rsm, + } + + dag = mockDAG() + transformer = &ObjectDeletionTransformer{} + }) + + Context("rsm deletion", func() { + It("should work well", func() { + ts := metav1.NewTime(time.Now()) + transCtx.rsmOrig.DeletionTimestamp = &ts + transCtx.rsm.DeletionTimestamp = &ts + sts := mockUnderlyingSts(*rsm, rsm.Generation) + headLessSvc := buildHeadlessSvc(*rsm) + envConfig := buildEnvConfigMap(*rsm) + actionName := getActionName(rsm.Name, int(rsm.Generation), 1, jobTypeSwitchover) + action := builder.NewJobBuilder(name, actionName). + AddLabelsInMap(map[string]string{ + constant.AppInstanceLabelKey: rsm.Name, + constant.KBManagedByKey: kindReplicatedStateMachine, + jobScenarioLabel: jobScenarioMembership, + jobTypeLabel: jobTypeSwitchover, + jobHandledLabel: jobHandledFalse, + }). + SetSuspend(false). + GetObject() + k8sMock.EXPECT(). + List(gomock.Any(), &apps.StatefulSetList{}, gomock.Any()). + DoAndReturn(func(_ context.Context, list *apps.StatefulSetList, _ ...client.ListOption) error { + Expect(list).ShouldNot(BeNil()) + list.Items = []apps.StatefulSet{*sts} + return nil + }).Times(1) + k8sMock.EXPECT(). + List(gomock.Any(), &corev1.ServiceList{}, gomock.Any()). + DoAndReturn(func(_ context.Context, list *corev1.ServiceList, _ ...client.ListOption) error { + Expect(list).ShouldNot(BeNil()) + list.Items = []corev1.Service{*headLessSvc} + return nil + }).Times(1) + k8sMock.EXPECT(). + List(gomock.Any(), &corev1.ConfigMapList{}, gomock.Any()). + DoAndReturn(func(_ context.Context, list *corev1.ConfigMapList, _ ...client.ListOption) error { + Expect(list).ShouldNot(BeNil()) + list.Items = []corev1.ConfigMap{*envConfig} + return nil + }).Times(1) + k8sMock.EXPECT(). + List(gomock.Any(), &batchv1.JobList{}, gomock.Any()). + DoAndReturn(func(_ context.Context, list *batchv1.JobList, _ ...client.ListOption) error { + Expect(list).ShouldNot(BeNil()) + list.Items = []batchv1.Job{*action} + return nil + }).Times(1) + k8sMock.EXPECT(). + List(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { + Expect(list).ShouldNot(BeNil()) + return nil + }).Times(3) + + Expect(transformer.Transform(transCtx, dag)).Should(Equal(graph.ErrPrematureStop)) + dagExpected := mockDAG() + model.PrepareDelete(dagExpected, action) + model.PrepareDelete(dagExpected, envConfig) + model.PrepareDelete(dagExpected, headLessSvc) + model.PrepareDelete(dagExpected, sts) + Expect(dag.Equals(dagExpected, less)).Should(BeTrue()) + }) + }) +}) diff --git a/internal/controller/consensusset/transformer_fix_meta.go b/internal/controller/rsm/transformer_fix_meta.go similarity index 85% rename from internal/controller/consensusset/transformer_fix_meta.go rename to internal/controller/rsm/transformer_fix_meta.go index 48df967f2b5..4ef24dc35fc 100644 --- a/internal/controller/consensusset/transformer_fix_meta.go +++ b/internal/controller/rsm/transformer_fix_meta.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package consensusset +package rsm import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -28,25 +28,25 @@ import ( type FixMetaTransformer struct{} +var _ graph.Transformer = &FixMetaTransformer{} + func (t *FixMetaTransformer) Transform(ctx graph.TransformContext, dag *graph.DAG) error { - transCtx, _ := ctx.(*CSSetTransformContext) - csSet := transCtx.CSSet - if model.IsObjectDeleting(csSet) { + transCtx, _ := ctx.(*rsmTransformContext) + obj := transCtx.rsm + if model.IsObjectDeleting(obj) { return nil } // The object is not being deleted, so if it does not have our finalizer, // then lets add the finalizer and update the object. This is equivalent // registering our finalizer. - if controllerutil.ContainsFinalizer(csSet, csSetFinalizerName) { + if controllerutil.ContainsFinalizer(obj, rsmFinalizerName) { return nil } - controllerutil.AddFinalizer(csSet, csSetFinalizerName) + controllerutil.AddFinalizer(obj, rsmFinalizerName) if err := model.PrepareRootUpdate(dag); err != nil { return err } return graph.ErrPrematureStop } - -var _ graph.Transformer = &FixMetaTransformer{} diff --git a/internal/controller/rsm/transformer_fix_meta_test.go b/internal/controller/rsm/transformer_fix_meta_test.go new file mode 100644 index 00000000000..1d2e8dc8b66 --- /dev/null +++ b/internal/controller/rsm/transformer_fix_meta_test.go @@ -0,0 +1,72 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package rsm + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" + "github.com/apecloud/kubeblocks/internal/controller/builder" + "github.com/apecloud/kubeblocks/internal/controller/graph" + "github.com/apecloud/kubeblocks/internal/controller/model" + "k8s.io/utils/strings/slices" +) + +var _ = Describe("fix meta transformer test.", func() { + BeforeEach(func() { + rsm = builder.NewReplicatedStateMachineBuilder(namespace, name). + SetUID(uid). + SetReplicas(3). + GetObject() + + transCtx = &rsmTransformContext{ + Context: ctx, + Client: k8sMock, + EventRecorder: nil, + Logger: logger, + rsmOrig: rsm.DeepCopy(), + rsm: rsm, + } + + dag = mockDAG() + transformer = &FixMetaTransformer{} + }) + + Context("fix meta", func() { + It("should work well", func() { + Expect(transformer.Transform(transCtx, dag)).Should(Equal(graph.ErrPrematureStop)) + dagExpected := graph.NewDAG() + root := &model.ObjectVertex{ + Obj: transCtx.rsm, + OriObj: transCtx.rsmOrig, + Action: model.ActionPtr(model.UPDATE), + } + dagExpected.AddVertex(root) + Expect(dag.Equals(dagExpected, less)).Should(BeTrue()) + root, err := model.FindRootVertex(dag) + Expect(err).Should(BeNil()) + rsmNew, ok := root.Obj.(*workloads.ReplicatedStateMachine) + Expect(ok).Should(BeTrue()) + Expect(rsmNew.Finalizers).ShouldNot(BeNil()) + Expect(slices.Contains(rsmNew.Finalizers, rsmFinalizerName)).Should(BeTrue()) + }) + }) +}) diff --git a/internal/controller/consensusset/transformer_init.go b/internal/controller/rsm/transformer_init.go similarity index 82% rename from internal/controller/consensusset/transformer_init.go rename to internal/controller/rsm/transformer_init.go index 8bf93b1ae5c..c7ac68c773c 100644 --- a/internal/controller/consensusset/transformer_init.go +++ b/internal/controller/rsm/transformer_init.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package consensusset +package rsm import ( workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" @@ -26,17 +26,17 @@ import ( ) type initTransformer struct { - *workloads.ConsensusSet + *workloads.ReplicatedStateMachine } +var _ graph.Transformer = &initTransformer{} + func (t *initTransformer) Transform(ctx graph.TransformContext, dag *graph.DAG) error { // init context - transCtx, _ := ctx.(*CSSetTransformContext) - transCtx.CSSet, transCtx.OrigCSSet = t.ConsensusSet, t.ConsensusSet.DeepCopy() + transCtx, _ := ctx.(*rsmTransformContext) + transCtx.rsm, transCtx.rsmOrig = t.ReplicatedStateMachine, t.ReplicatedStateMachine.DeepCopy() // init dag - model.PrepareStatus(dag, transCtx.OrigCSSet, transCtx.CSSet) + model.PrepareStatus(dag, transCtx.rsmOrig, transCtx.rsm) return nil } - -var _ graph.Transformer = &initTransformer{} diff --git a/internal/controller/rsm/transformer_init_test.go b/internal/controller/rsm/transformer_init_test.go new file mode 100644 index 00000000000..4c7bab9cdcb --- /dev/null +++ b/internal/controller/rsm/transformer_init_test.go @@ -0,0 +1,64 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package rsm + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/apecloud/kubeblocks/internal/controller/builder" + "github.com/apecloud/kubeblocks/internal/controller/graph" + "github.com/apecloud/kubeblocks/internal/controller/model" +) + +var _ = Describe("init transformer test.", func() { + BeforeEach(func() { + rsm = builder.NewReplicatedStateMachineBuilder(namespace, name). + SetUID(uid). + SetReplicas(3). + GetObject() + + transCtx = &rsmTransformContext{ + Context: ctx, + Client: k8sMock, + EventRecorder: nil, + Logger: logger, + rsmOrig: rsm.DeepCopy(), + rsm: rsm, + } + + dag = graph.NewDAG() + transformer = &initTransformer{} + }) + + Context("dag init", func() { + It("should work well", func() { + Expect(transformer.Transform(transCtx, dag)).Should(Succeed()) + dagExpected := graph.NewDAG() + root := &model.ObjectVertex{ + Obj: transCtx.rsm, + OriObj: transCtx.rsmOrig, + Action: model.ActionPtr(model.STATUS), + } + dagExpected.AddVertex(root) + Expect(dag.Equals(dagExpected, less)).Should(BeTrue()) + }) + }) +}) diff --git a/internal/controller/consensusset/transformer_member_reconfiguration.go b/internal/controller/rsm/transformer_member_reconfiguration.go similarity index 63% rename from internal/controller/consensusset/transformer_member_reconfiguration.go rename to internal/controller/rsm/transformer_member_reconfiguration.go index ef8fd25646e..684e18f95d6 100644 --- a/internal/controller/consensusset/transformer_member_reconfiguration.go +++ b/internal/controller/rsm/transformer_member_reconfiguration.go @@ -17,14 +17,13 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package consensusset +package rsm import ( "fmt" "regexp" "strconv" - "github.com/go-logr/logr" apps "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" @@ -36,6 +35,8 @@ import ( // MemberReconfigurationTransformer handles member reconfiguration type MemberReconfigurationTransformer struct{} +var _ graph.Transformer = &MemberReconfigurationTransformer{} + type actionInfo struct { shortActionName string ordinal int @@ -47,56 +48,56 @@ type conditionChecker = func() bool var actionNameRegex = regexp.MustCompile(`(.*)-([0-9]+)-([0-9]+)-([a-zA-Z\-]+)$`) func (t *MemberReconfigurationTransformer) Transform(ctx graph.TransformContext, dag *graph.DAG) error { - transCtx, _ := ctx.(*CSSetTransformContext) - if model.IsObjectDeleting(transCtx.CSSet) { + transCtx, _ := ctx.(*rsmTransformContext) + if model.IsObjectDeleting(transCtx.rsm) { return nil } - csSet := transCtx.CSSet - - // get the underlying sts - stsVertex, err := getUnderlyingStsVertex(dag) - if err != nil { - return err - } + rsm := transCtx.rsm // handle cluster initialization // set initReplicas at creation - if csSet.Status.InitReplicas == 0 { - csSet.Status.InitReplicas = csSet.Spec.Replicas + if rsm.Status.InitReplicas == 0 { + rsm.Status.InitReplicas = rsm.Spec.Replicas return nil } // update readyInitReplicas - if csSet.Status.ReadyInitReplicas < csSet.Status.InitReplicas { - csSet.Status.ReadyInitReplicas = int32(len(csSet.Status.MembersStatus)) + if rsm.Status.ReadyInitReplicas < rsm.Status.InitReplicas { + rsm.Status.ReadyInitReplicas = int32(len(rsm.Status.MembersStatus)) } // return if cluster initialization not done - if csSet.Status.ReadyInitReplicas != csSet.Status.InitReplicas { + if rsm.Status.ReadyInitReplicas != rsm.Status.InitReplicas { return nil } // cluster initialization done, handle dynamic membership reconfiguration - // consensus cluster is ready - if isConsensusSetReady(csSet) { + // rsm is ready + if isRSMReady(rsm) { return cleanAction(transCtx, dag) } - if !shouldHaveActions(csSet) { + if !shouldHaveActions(rsm) { return nil } + // get the underlying sts + stsVertex, err := getUnderlyingStsVertex(dag) + if err != nil { + return err + } + // no enough replicas in scale out, tell sts to create them. sts, _ := stsVertex.OriObj.(*apps.StatefulSet) - memberReadyReplicas := int32(len(csSet.Status.MembersStatus)) - if memberReadyReplicas < csSet.Spec.Replicas && - sts.Status.ReadyReplicas < csSet.Spec.Replicas { + memberReadyReplicas := int32(len(rsm.Status.MembersStatus)) + if memberReadyReplicas < rsm.Spec.Replicas && + sts.Status.ReadyReplicas < rsm.Spec.Replicas { return nil } stsVertex.Immutable = true // barrier: the underlying sts is ready and has enough replicas - if sts.Status.ReadyReplicas < csSet.Spec.Replicas || !isStatefulSetReady(sts) { + if sts.Status.ReadyReplicas < rsm.Spec.Replicas || !isStatefulSetReady(sts) { return nil } @@ -108,7 +109,7 @@ func (t *MemberReconfigurationTransformer) Transform(ctx graph.TransformContext, // if no action, create the first one if len(actionList) == 0 { - return createNextAction(transCtx, dag, csSet, nil) + return createNextAction(transCtx, dag, rsm, nil) } // got action, there should be only one action @@ -117,35 +118,35 @@ func (t *MemberReconfigurationTransformer) Transform(ctx graph.TransformContext, case action.Status.Succeeded > 0: // wait action's result: // e.g. action with ordinal 3 and type member-join, wait member 3 until it appears in status.membersStatus - if !isActionDone(csSet, action) { + if !isActionDone(rsm, action) { return nil } // mark it as 'handled' deleteAction(dag, action) - return createNextAction(transCtx, dag, csSet, action) + return createNextAction(transCtx, dag, rsm, action) case action.Status.Failed > 0: emitEvent(transCtx, action) if !isSwitchoverAction(action) { // need manual handling return nil } - return createNextAction(transCtx, dag, csSet, action) + return createNextAction(transCtx, dag, rsm, action) default: // action in progress return nil } } -// consensus_set level 'ready' state: +// rsm level 'ready' state: // 1. all replicas exist // 2. all members have role set -func isConsensusSetReady(csSet *workloads.ConsensusSet) bool { - membersStatus := csSet.Status.MembersStatus - if len(membersStatus) != int(csSet.Spec.Replicas) { +func isRSMReady(rsm *workloads.ReplicatedStateMachine) bool { + membersStatus := rsm.Status.MembersStatus + if len(membersStatus) != int(rsm.Spec.Replicas) { return false } - for i := 0; i < int(csSet.Spec.Replicas); i++ { - podName := getPodName(csSet.Name, i) + for i := 0; i < int(rsm.Spec.Replicas); i++ { + podName := getPodName(rsm.Name, i) if !isMemberReady(podName, membersStatus) { return false } @@ -165,7 +166,7 @@ func isStatefulSetReady(sts *apps.StatefulSet) bool { return false } -func isMemberReady(podName string, membersStatus []workloads.ConsensusMemberStatus) bool { +func isMemberReady(podName string, membersStatus []workloads.MemberStatus) bool { for _, memberStatus := range membersStatus { if memberStatus.PodName == podName { return true @@ -174,7 +175,7 @@ func isMemberReady(podName string, membersStatus []workloads.ConsensusMemberStat return false } -func cleanAction(transCtx *CSSetTransformContext, dag *graph.DAG) error { +func cleanAction(transCtx *rsmTransformContext, dag *graph.DAG) error { actionList, err := getActionList(transCtx, jobScenarioMembership) if err != nil { return err @@ -192,13 +193,13 @@ func cleanAction(transCtx *CSSetTransformContext, dag *graph.DAG) error { return nil } -func isActionDone(csSet *workloads.ConsensusSet, action *batchv1.Job) bool { +func isActionDone(rsm *workloads.ReplicatedStateMachine, action *batchv1.Job) bool { ordinal, _ := getActionOrdinal(action.Name) - podName := getPodName(csSet.Name, ordinal) - membersStatus := csSet.Status.MembersStatus + podName := getPodName(rsm.Name, ordinal) + membersStatus := rsm.Status.MembersStatus switch action.Labels[jobTypeLabel] { case jobTypeSwitchover: - leader := getLeaderPodName(csSet.Status.MembersStatus) + leader := getLeaderPodName(rsm.Status.MembersStatus) return podName != leader case jobTypeMemberLeaveNotifying: return !isMemberReady(podName, membersStatus) @@ -218,72 +219,50 @@ func deleteAction(dag *graph.DAG, action *batchv1.Job) { doActionCleanup(dag, action) } -func createNextAction(transCtx *CSSetTransformContext, dag *graph.DAG, csSet *workloads.ConsensusSet, currentAction *batchv1.Job) error { - actionInfoList := generateActionInfoList(csSet) +func createNextAction(transCtx *rsmTransformContext, dag *graph.DAG, rsm *workloads.ReplicatedStateMachine, currentAction *batchv1.Job) error { + actionInfoList := generateActionInfoList(rsm) if len(actionInfoList) == 0 { return nil } - var nextActionInfo *actionInfo - switch { - case currentAction == nil, isSwitchoverAction(currentAction): - nextActionInfo = actionInfoList[0] - default: - nextActionInfo = nil - ordinal, _ := getActionOrdinal(currentAction.Name) - shortName := buildShortActionName(csSet.Name, ordinal, currentAction.Labels[jobTypeLabel]) - for i := 0; i < len(actionInfoList); i++ { - if actionInfoList[i].shortActionName != shortName { - continue - } - if i+1 < len(actionInfoList) { - nextActionInfo = actionInfoList[i+1] - break - } - } - } - - if nextActionInfo == nil { - return nil - } - - leader := getLeaderPodName(csSet.Status.MembersStatus) + nextActionInfo := actionInfoList[0] + leader := getLeaderPodName(rsm.Status.MembersStatus) ordinal := nextActionInfo.ordinal if nextActionInfo.actionType == jobTypeSwitchover { ordinal = 0 } - target := getPodName(csSet.Name, ordinal) - actionName := getActionName(csSet.Name, int(csSet.Generation), nextActionInfo.ordinal, nextActionInfo.actionType) - nextAction := buildAction(csSet, actionName, nextActionInfo.actionType, jobScenarioMembership, leader, target) + target := getPodName(rsm.Name, ordinal) + actionName := getActionName(rsm.Name, int(rsm.Generation), nextActionInfo.ordinal, nextActionInfo.actionType) + nextAction := buildAction(rsm, actionName, nextActionInfo.actionType, jobScenarioMembership, leader, target) - if err := abnormalAnalysis(csSet, nextAction); err != nil { + if err := abnormalAnalysis(rsm, nextAction); err != nil { emitAbnormalEvent(transCtx, nextActionInfo.actionType, actionName, err) return err } - return createAction(dag, csSet, nextAction) + return createAction(dag, rsm, nextAction) } -func generateActionInfoList(csSet *workloads.ConsensusSet) []*actionInfo { +func generateActionInfoList(rsm *workloads.ReplicatedStateMachine) []*actionInfo { var actionInfoList []*actionInfo - memberReadyReplicas := int32(len(csSet.Status.MembersStatus)) + memberReadyReplicas := int32(len(rsm.Status.MembersStatus)) switch { - case memberReadyReplicas < csSet.Spec.Replicas: + case memberReadyReplicas < rsm.Spec.Replicas: // member join - // members with ordinal less than 'spec.replicas' should in the consensus cluster + // members with ordinal less than 'spec.replicas' should in the active cluster actionTypeList := []string{jobTypeMemberJoinNotifying, jobTypeLogSync, jobTypePromote} - for i := memberReadyReplicas; i < csSet.Spec.Replicas; i++ { - actionInfos := generateActionInfos(csSet, int(i), actionTypeList) + for i := memberReadyReplicas; i < rsm.Spec.Replicas; i++ { + actionInfos := generateActionInfos(rsm, int(i), actionTypeList) actionInfoList = append(actionInfoList, actionInfos...) } - case memberReadyReplicas > csSet.Spec.Replicas: + case memberReadyReplicas > rsm.Spec.Replicas: // member leave - // members with ordinal greater than 'spec.replicas - 1' should not in the consensus cluster + // members with ordinal greater than 'spec.replicas - 1' should not in the active cluster actionTypeList := []string{jobTypeSwitchover, jobTypeMemberLeaveNotifying} - for i := memberReadyReplicas - 1; i >= csSet.Spec.Replicas; i-- { - actionInfos := generateActionInfos(csSet, int(i), actionTypeList) + for i := memberReadyReplicas - 1; i >= rsm.Spec.Replicas; i-- { + actionInfos := generateActionInfos(rsm, int(i), actionTypeList) actionInfoList = append(actionInfoList, actionInfos...) } } @@ -291,22 +270,13 @@ func generateActionInfoList(csSet *workloads.ConsensusSet) []*actionInfo { return actionInfoList } -// TODO(free6om): remove all printActionList when all testes pass -func printActionList(logger logr.Logger, actionList []*batchv1.Job) { - var actionNameList []string - for _, action := range actionList { - actionNameList = append(actionNameList, fmt.Sprintf("%s-%v", action.Name, *action.Spec.Suspend)) - } - logger.Info(fmt.Sprintf("action list: %v\n", actionNameList)) -} - func isPreAction(actionType string) bool { return actionType == jobTypeSwitchover || actionType == jobTypeMemberLeaveNotifying } -func shouldHaveActions(csSet *workloads.ConsensusSet) bool { - currentReplicas := len(csSet.Status.MembersStatus) - expectedReplicas := int(csSet.Spec.Replicas) +func shouldHaveActions(rsm *workloads.ReplicatedStateMachine) bool { + currentReplicas := len(rsm.Status.MembersStatus) + expectedReplicas := int(rsm.Spec.Replicas) var actionTypeList []string switch { @@ -316,18 +286,18 @@ func shouldHaveActions(csSet *workloads.ConsensusSet) bool { actionTypeList = []string{jobTypeMemberJoinNotifying, jobTypeLogSync, jobTypePromote} } for _, actionType := range actionTypeList { - if shouldCreateAction(csSet, actionType, nil) { + if shouldCreateAction(rsm, actionType, nil) { return true } } return false } -func shouldCreateAction(csSet *workloads.ConsensusSet, actionType string, checker conditionChecker) bool { +func shouldCreateAction(rsm *workloads.ReplicatedStateMachine, actionType string, checker conditionChecker) bool { if checker != nil && !checker() { return false } - reconfiguration := csSet.Spec.MembershipReconfiguration + reconfiguration := rsm.Spec.MembershipReconfiguration if reconfiguration == nil { return false } @@ -367,12 +337,12 @@ func getUnderlyingStsVertex(dag *graph.DAG) (*model.ObjectVertex, error) { return stsVertex, nil } -// all members with ordinal less than action target pod should be in a good consensus state: +// all members with ordinal less than action target pod should be in a good replication state: // 1. they should be in membersStatus // 2. they should have a leader -func abnormalAnalysis(csSet *workloads.ConsensusSet, action *batchv1.Job) error { - membersStatus := csSet.Status.MembersStatus - statusMap := make(map[string]workloads.ConsensusMemberStatus, len(membersStatus)) +func abnormalAnalysis(rsm *workloads.ReplicatedStateMachine, action *batchv1.Job) error { + membersStatus := rsm.Status.MembersStatus + statusMap := make(map[string]workloads.MemberStatus, len(membersStatus)) for _, status := range membersStatus { statusMap[status.PodName] = status } @@ -383,7 +353,7 @@ func abnormalAnalysis(csSet *workloads.ConsensusSet, action *batchv1.Job) error } var abnormalPodList, leaderPodList []string for i := 0; i < currentMembers; i++ { - podName := getPodName(csSet.Name, i) + podName := getPodName(rsm.Name, i) status, ok := statusMap[podName] if !ok { abnormalPodList = append(abnormalPodList, podName) @@ -411,10 +381,10 @@ func abnormalAnalysis(csSet *workloads.ConsensusSet, action *batchv1.Job) error return nil } -func generateActionInfos(csSet *workloads.ConsensusSet, ordinal int, actionTypeList []string) []*actionInfo { +func generateActionInfos(rsm *workloads.ReplicatedStateMachine, ordinal int, actionTypeList []string) []*actionInfo { var actionInfos []*actionInfo - leaderPodName := getLeaderPodName(csSet.Status.MembersStatus) - podName := getPodName(csSet.Name, ordinal) + leaderPodName := getLeaderPodName(rsm.Status.MembersStatus) + podName := getPodName(rsm.Name, ordinal) for _, actionType := range actionTypeList { checker := func() bool { return podName == leaderPodName @@ -422,11 +392,11 @@ func generateActionInfos(csSet *workloads.ConsensusSet, ordinal int, actionTypeL if actionType != jobTypeSwitchover { checker = nil } - if !shouldCreateAction(csSet, actionType, checker) { + if !shouldCreateAction(rsm, actionType, checker) { continue } info := &actionInfo{ - shortActionName: buildShortActionName(csSet.Name, ordinal, actionType), + shortActionName: buildShortActionName(rsm.Name, ordinal, actionType), ordinal: ordinal, actionType: actionType, } @@ -434,5 +404,3 @@ func generateActionInfos(csSet *workloads.ConsensusSet, ordinal int, actionTypeL } return actionInfos } - -var _ graph.Transformer = &MemberReconfigurationTransformer{} diff --git a/internal/controller/rsm/transformer_member_reconfiguration_test.go b/internal/controller/rsm/transformer_member_reconfiguration_test.go new file mode 100644 index 00000000000..d03a6ab65aa --- /dev/null +++ b/internal/controller/rsm/transformer_member_reconfiguration_test.go @@ -0,0 +1,283 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package rsm + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/golang/mock/gomock" + apps "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" + "github.com/apecloud/kubeblocks/internal/constant" + "github.com/apecloud/kubeblocks/internal/controller/builder" + "github.com/apecloud/kubeblocks/internal/controller/graph" + "github.com/apecloud/kubeblocks/internal/controller/model" +) + +var _ = Describe("member reconfiguration transformer test.", func() { + buildMembersStatus := func(replicas int) []workloads.MemberStatus { + var membersStatus []workloads.MemberStatus + for i := 0; i < replicas; i++ { + status := workloads.MemberStatus{ + PodName: getPodName(rsm.Name, i), + ReplicaRole: workloads.ReplicaRole{Name: "follower"}, + } + membersStatus = append(membersStatus, status) + } + if replicas > 1 { + membersStatus[1].ReplicaRole = workloads.ReplicaRole{Name: "leader", IsLeader: true} + } + return membersStatus + } + setRSMStatus := func(replicas int) { + membersStatus := buildMembersStatus(replicas) + rsm.Status.InitReplicas = 3 + rsm.Status.ReadyInitReplicas = rsm.Status.InitReplicas + rsm.Status.MembersStatus = membersStatus + rsm.Status.Replicas = rsm.Spec.Replicas + rsm.Status.ReadyReplicas = rsm.Status.Replicas + rsm.Status.AvailableReplicas = rsm.Status.Replicas + } + mockAction := func(ordinal int, actionType string, succeed bool) *batchv1.Job { + actionName := getActionName(rsm.Name, int(rsm.Generation), ordinal, actionType) + action := builder.NewJobBuilder(name, actionName). + AddLabelsInMap(map[string]string{ + constant.AppInstanceLabelKey: rsm.Name, + constant.KBManagedByKey: kindReplicatedStateMachine, + jobScenarioLabel: jobScenarioMembership, + jobTypeLabel: actionType, + jobHandledLabel: jobHandledFalse, + }). + SetSuspend(false). + GetObject() + if succeed { + action.Status.Succeeded = 1 + k8sMock.EXPECT(). + List(gomock.Any(), &batchv1.JobList{}, gomock.Any()). + DoAndReturn(func(_ context.Context, list *batchv1.JobList, _ ...client.ListOption) error { + Expect(list).ShouldNot(BeNil()) + list.Items = []batchv1.Job{*action} + return nil + }).Times(1) + } + return action + } + mockDAG := func(stsOld, stsNew *apps.StatefulSet) *graph.DAG { + d := graph.NewDAG() + model.PrepareStatus(d, transCtx.rsmOrig, transCtx.rsm) + model.PrepareUpdate(d, stsOld, stsNew) + return d + } + expectStsImmutable := func(d *graph.DAG, immutable bool) { + stsVertex, err := getUnderlyingStsVertex(d) + Expect(err).Should(BeNil()) + Expect(stsVertex.Immutable).Should(Equal(immutable)) + } + + BeforeEach(func() { + rsm = builder.NewReplicatedStateMachineBuilder(namespace, name). + SetUID(uid). + SetReplicas(3). + SetRoles(roles). + SetMembershipReconfiguration(reconfiguration). + SetService(service). + GetObject() + + transCtx = &rsmTransformContext{ + Context: ctx, + Client: k8sMock, + EventRecorder: nil, + Logger: logger, + rsmOrig: rsm.DeepCopy(), + rsm: rsm, + } + + dag = graph.NewDAG() + model.PrepareStatus(dag, transCtx.rsmOrig, transCtx.rsm) + transformer = &MemberReconfigurationTransformer{} + }) + + Context("cluster initialization", func() { + It("should initialize well", func() { + By("initialReplicas=0") + Expect(transformer.Transform(transCtx, dag)).Should(Succeed()) + Expect(rsm.Status.InitReplicas).Should(Equal(rsm.Spec.Replicas)) + + By("init one member") + membersStatus := buildMembersStatus(1) + rsm.Status.MembersStatus = membersStatus + Expect(transformer.Transform(transCtx, dag)).Should(Succeed()) + Expect(rsm.Status.ReadyInitReplicas).Should(BeEquivalentTo(1)) + + By("all members initialized") + membersStatus = buildMembersStatus(int(rsm.Spec.Replicas)) + rsm.Status.MembersStatus = membersStatus + k8sMock.EXPECT(). + List(gomock.Any(), &batchv1.JobList{}, gomock.Any()). + DoAndReturn(func(_ context.Context, list *batchv1.JobList, _ ...client.ListOption) error { + return nil + }).Times(1) + Expect(transformer.Transform(transCtx, dag)).Should(Succeed()) + Expect(rsm.Status.ReadyInitReplicas).Should(Equal(rsm.Status.InitReplicas)) + }) + }) + + Context("scale-out", func() { + It("should work well", func() { + By("make rsm ready for scale-out") + setRSMStatus(int(rsm.Spec.Replicas)) + rsm.Generation = 2 + rsm.Status.ObservedGeneration = 2 + stsOld := mockUnderlyingSts(*rsm, rsm.Generation) + // rsm spec updated + rsm.Generation = 3 + rsm.Spec.Replicas = 5 + sts := mockUnderlyingSts(*rsm, rsm.Generation) + model.PrepareUpdate(dag, stsOld, sts) + + By("update the underlying sts") + Expect(transformer.Transform(transCtx, dag)).Should(Succeed()) + expectStsImmutable(dag, false) + + rsm.Status.ObservedGeneration = rsm.Generation + + By("prepare member 3 joining") + sts = mockUnderlyingSts(*rsm, rsm.Generation) + k8sMock.EXPECT(). + List(gomock.Any(), &batchv1.JobList{}, gomock.Any()). + DoAndReturn(func(_ context.Context, list *batchv1.JobList, _ ...client.ListOption) error { + return nil + }).Times(1) + dag = mockDAG(sts, sts) + Expect(transformer.Transform(transCtx, dag)).Should(Succeed()) + expectStsImmutable(dag, true) + dagExpected := mockDAG(sts, sts) + action := mockAction(3, jobTypeMemberJoinNotifying, false) + model.PrepareCreate(dagExpected, action) + Expect(dag.Equals(dagExpected, less)).Should(BeTrue()) + + By("make member 3 joining successfully and prepare member 4 joining") + setRSMStatus(4) + action = mockAction(3, jobTypeMemberJoinNotifying, true) + dag = mockDAG(sts, sts) + Expect(transformer.Transform(transCtx, dag)).Should(Succeed()) + expectStsImmutable(dag, true) + dagExpected = mockDAG(sts, sts) + model.PrepareUpdate(dagExpected, action, action) + action = mockAction(4, jobTypeMemberJoinNotifying, false) + model.PrepareCreate(dagExpected, action) + Expect(dag.Equals(dagExpected, less)).Should(BeTrue()) + + By("make member 4 joining successfully and cleanup") + setRSMStatus(int(rsm.Spec.Replicas)) + action = mockAction(4, jobTypeMemberJoinNotifying, true) + dag = mockDAG(sts, sts) + Expect(transformer.Transform(transCtx, dag)).Should(Succeed()) + expectStsImmutable(dag, false) + dagExpected = mockDAG(sts, sts) + model.PrepareUpdate(dagExpected, action, action) + Expect(dag.Equals(dagExpected, less)).Should(BeTrue()) + }) + }) + + Context("scale-in", func() { + It("should work well", func() { + setRSMMembersStatus := func(replicas int) { + membersStatus := buildMembersStatus(replicas) + rsm.Status.InitReplicas = 3 + rsm.Status.ReadyInitReplicas = rsm.Status.InitReplicas + rsm.Status.MembersStatus = membersStatus + } + By("make rsm ready for scale-in") + setRSMStatus(int(rsm.Spec.Replicas)) + rsm.Generation = 2 + rsm.Status.ObservedGeneration = 2 + stsOld := mockUnderlyingSts(*rsm, rsm.Generation) + // rsm spec updated + rsm.Generation = 3 + rsm.Spec.Replicas = 1 + sts := mockUnderlyingSts(*rsm, rsm.Generation) + model.PrepareUpdate(dag, stsOld, sts) + + By("prepare member 2 leaving") + k8sMock.EXPECT(). + List(gomock.Any(), &batchv1.JobList{}, gomock.Any()). + DoAndReturn(func(_ context.Context, list *batchv1.JobList, _ ...client.ListOption) error { + return nil + }).Times(1) + Expect(transformer.Transform(transCtx, dag)).Should(Succeed()) + expectStsImmutable(dag, true) + dagExpected := mockDAG(stsOld, sts) + action := mockAction(2, jobTypeMemberLeaveNotifying, false) + model.PrepareCreate(dagExpected, action) + Expect(dag.Equals(dagExpected, less)).Should(BeTrue()) + + By("make member 2 leaving successfully and prepare member 1 switchover") + setRSMMembersStatus(2) + action = mockAction(2, jobTypeMemberLeaveNotifying, true) + dag = mockDAG(stsOld, sts) + Expect(transformer.Transform(transCtx, dag)).Should(Succeed()) + expectStsImmutable(dag, true) + dagExpected = mockDAG(stsOld, sts) + model.PrepareUpdate(dagExpected, action, action) + action = mockAction(1, jobTypeSwitchover, false) + model.PrepareCreate(dagExpected, action) + Expect(dag.Equals(dagExpected, less)).Should(BeTrue()) + + By("make member 1 switchover successfully and prepare member 1 leaving") + membersStatus := []workloads.MemberStatus{ + { + PodName: getPodName(rsm.Name, 0), + ReplicaRole: workloads.ReplicaRole{Name: "leader", IsLeader: true}, + }, + { + PodName: getPodName(rsm.Name, 1), + ReplicaRole: workloads.ReplicaRole{Name: "follower"}, + }, + } + rsm.Status.MembersStatus = membersStatus + action = mockAction(1, jobTypeSwitchover, true) + dag = mockDAG(stsOld, sts) + Expect(transformer.Transform(transCtx, dag)).Should(Succeed()) + expectStsImmutable(dag, true) + dagExpected = mockDAG(stsOld, sts) + model.PrepareUpdate(dagExpected, action, action) + action = mockAction(1, jobTypeMemberLeaveNotifying, false) + model.PrepareCreate(dagExpected, action) + Expect(dag.Equals(dagExpected, less)).Should(BeTrue()) + + By("make member 1 leaving successfully and cleanup") + setRSMMembersStatus(1) + action = mockAction(1, jobTypeMemberLeaveNotifying, true) + dag = mockDAG(stsOld, sts) + Expect(transformer.Transform(transCtx, dag)).Should(Succeed()) + expectStsImmutable(dag, false) + dagExpected = mockDAG(stsOld, sts) + model.PrepareUpdate(dagExpected, action, action) + Expect(dag.Equals(dagExpected, less)).Should(BeTrue()) + }) + }) +}) diff --git a/internal/controller/consensusset/transformer_object_generation.go b/internal/controller/rsm/transformer_object_generation.go similarity index 72% rename from internal/controller/consensusset/transformer_object_generation.go rename to internal/controller/rsm/transformer_object_generation.go index b3974375cd7..cf20764ec2b 100644 --- a/internal/controller/consensusset/transformer_object_generation.go +++ b/internal/controller/rsm/transformer_object_generation.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package consensusset +package rsm import ( "encoding/json" @@ -42,31 +42,33 @@ import ( type ObjectGenerationTransformer struct{} +var _ graph.Transformer = &ObjectGenerationTransformer{} + func (t *ObjectGenerationTransformer) Transform(ctx graph.TransformContext, dag *graph.DAG) error { - transCtx, _ := ctx.(*CSSetTransformContext) - csSet := transCtx.CSSet - oriSet := transCtx.OrigCSSet + transCtx, _ := ctx.(*rsmTransformContext) + rsm := transCtx.rsm + rsmOrig := transCtx.rsmOrig - if model.IsObjectDeleting(oriSet) { + if model.IsObjectDeleting(rsmOrig) { return nil } // generate objects by current spec - svc := buildSvc(*csSet) - headLessSvc := buildHeadlessSvc(*csSet) - envConfig := buildEnvConfigMap(*csSet) - sts := buildSts(*csSet, headLessSvc.Name, *envConfig) + svc := buildSvc(*rsm) + headLessSvc := buildHeadlessSvc(*rsm) + envConfig := buildEnvConfigMap(*rsm) + sts := buildSts(*rsm, headLessSvc.Name, *envConfig) objects := []client.Object{svc, headLessSvc, envConfig, sts} for _, object := range objects { - if err := controllerutil.SetOwnership(csSet, object, model.GetScheme(), csSetFinalizerName); err != nil { + if err := controllerutil.SetOwnership(rsm, object, model.GetScheme(), rsmFinalizerName); err != nil { return err } } // read cache snapshot - ml := client.MatchingLabels{model.AppInstanceLabelKey: csSet.Name, model.KBManagedByKey: kindConsensusSet} - oldSnapshot, err := model.ReadCacheSnapshot(ctx, csSet, ml, ownedKinds()...) + ml := client.MatchingLabels{constant.AppInstanceLabelKey: rsm.Name, constant.KBManagedByKey: kindReplicatedStateMachine} + oldSnapshot, err := model.ReadCacheSnapshot(ctx, rsm, ml, ownedKinds()...) if err != nil { return err } @@ -104,7 +106,6 @@ func (t *ObjectGenerationTransformer) Transform(ctx graph.TransformContext, dag model.PrepareDelete(dag, oldSnapshot[name]) } } - handleDependencies := func() { model.DependOn(dag, sts, svc, headLessSvc, envConfig) } @@ -121,36 +122,36 @@ func (t *ObjectGenerationTransformer) Transform(ctx graph.TransformContext, dag return nil } -func buildSvc(csSet workloads.ConsensusSet) *corev1.Service { - svcBuilder := builder.NewServiceBuilder(csSet.Namespace, csSet.Name). - AddLabels(model.AppInstanceLabelKey, csSet.Name). - AddLabels(model.KBManagedByKey, kindConsensusSet). - // AddAnnotationsInMap(csSet.Annotations). - AddSelectors(model.AppInstanceLabelKey, csSet.Name). - AddSelectors(model.KBManagedByKey, kindConsensusSet). - AddPorts(csSet.Spec.Service.Ports...). - SetType(csSet.Spec.Service.Type) - for _, role := range csSet.Spec.Roles { +func buildSvc(rsm workloads.ReplicatedStateMachine) *corev1.Service { + svcBuilder := builder.NewServiceBuilder(rsm.Namespace, rsm.Name). + AddLabels(constant.AppInstanceLabelKey, rsm.Name). + AddLabels(constant.KBManagedByKey, kindReplicatedStateMachine). + // AddAnnotationsInMap(rsm.Annotations). + AddSelectors(constant.AppInstanceLabelKey, rsm.Name). + AddSelectors(constant.KBManagedByKey, kindReplicatedStateMachine). + AddPorts(rsm.Spec.Service.Ports...). + SetType(rsm.Spec.Service.Type) + for _, role := range rsm.Spec.Roles { if role.IsLeader && len(role.Name) > 0 { - svcBuilder.AddSelectors(model.ConsensusSetAccessModeLabelKey, string(role.AccessMode)) + svcBuilder.AddSelectors(rsmAccessModeLabelKey, string(role.AccessMode)) } } return svcBuilder.GetObject() } -func buildHeadlessSvc(csSet workloads.ConsensusSet) *corev1.Service { - hdlBuilder := builder.NewHeadlessServiceBuilder(csSet.Namespace, getHeadlessSvcName(csSet)). - AddLabels(model.AppInstanceLabelKey, csSet.Name). - AddLabels(model.KBManagedByKey, kindConsensusSet). - AddSelectors(model.AppInstanceLabelKey, csSet.Name). - AddSelectors(model.KBManagedByKey, kindConsensusSet) +func buildHeadlessSvc(rsm workloads.ReplicatedStateMachine) *corev1.Service { + hdlBuilder := builder.NewHeadlessServiceBuilder(rsm.Namespace, getHeadlessSvcName(rsm)). + AddLabels(constant.AppInstanceLabelKey, rsm.Name). + AddLabels(constant.KBManagedByKey, kindReplicatedStateMachine). + AddSelectors(constant.AppInstanceLabelKey, rsm.Name). + AddSelectors(constant.KBManagedByKey, kindReplicatedStateMachine) // .AddAnnotations("prometheus.io/scrape", strconv.FormatBool(component.Monitor.Enable)) // if component.Monitor.Enable { // hdBuilder.AddAnnotations("prometheus.io/path", component.Monitor.ScrapePath). // AddAnnotations("prometheus.io/port", strconv.Itoa(int(component.Monitor.ScrapePort))). // AddAnnotations("prometheus.io/scheme", "http") // } - for _, container := range csSet.Spec.Template.Spec.Containers { + for _, container := range rsm.Spec.Template.Spec.Containers { for _, port := range container.Ports { servicePort := corev1.ServicePort{ Protocol: port.Protocol, @@ -170,38 +171,38 @@ func buildHeadlessSvc(csSet workloads.ConsensusSet) *corev1.Service { return hdlBuilder.GetObject() } -func buildSts(csSet workloads.ConsensusSet, headlessSvcName string, envConfig corev1.ConfigMap) *apps.StatefulSet { - stsBuilder := builder.NewStatefulSetBuilder(csSet.Namespace, csSet.Name) - template := buildStsPodTemplate(csSet, envConfig) - stsBuilder.AddLabels(model.AppInstanceLabelKey, csSet.Name). - AddLabels(model.KBManagedByKey, kindConsensusSet). - AddMatchLabel(model.AppInstanceLabelKey, csSet.Name). - AddMatchLabel(model.KBManagedByKey, kindConsensusSet). +func buildSts(rsm workloads.ReplicatedStateMachine, headlessSvcName string, envConfig corev1.ConfigMap) *apps.StatefulSet { + stsBuilder := builder.NewStatefulSetBuilder(rsm.Namespace, rsm.Name) + template := buildStsPodTemplate(rsm, envConfig) + stsBuilder.AddLabels(constant.AppInstanceLabelKey, rsm.Name). + AddLabels(constant.KBManagedByKey, kindReplicatedStateMachine). + AddMatchLabel(constant.AppInstanceLabelKey, rsm.Name). + AddMatchLabel(constant.KBManagedByKey, kindReplicatedStateMachine). SetServiceName(headlessSvcName). - SetReplicas(csSet.Spec.Replicas). + SetReplicas(rsm.Spec.Replicas). SetPodManagementPolicy(apps.OrderedReadyPodManagement). - SetVolumeClaimTemplates(csSet.Spec.VolumeClaimTemplates...). + SetVolumeClaimTemplates(rsm.Spec.VolumeClaimTemplates...). SetTemplate(*template). SetUpdateStrategyType(apps.OnDeleteStatefulSetStrategyType) return stsBuilder.GetObject() } -func buildEnvConfigMap(csSet workloads.ConsensusSet) *corev1.ConfigMap { - envData := buildEnvConfigData(csSet) - return builder.NewConfigMapBuilder(csSet.Namespace, csSet.Name+"-env"). - AddLabels(model.AppInstanceLabelKey, csSet.Name). - AddLabels(model.KBManagedByKey, kindConsensusSet). +func buildEnvConfigMap(rsm workloads.ReplicatedStateMachine) *corev1.ConfigMap { + envData := buildEnvConfigData(rsm) + return builder.NewConfigMapBuilder(rsm.Namespace, rsm.Name+"-env"). + AddLabels(constant.AppInstanceLabelKey, rsm.Name). + AddLabels(constant.KBManagedByKey, kindReplicatedStateMachine). SetData(envData).GetObject() } -func buildStsPodTemplate(csSet workloads.ConsensusSet, envConfig corev1.ConfigMap) *corev1.PodTemplateSpec { - template := csSet.Spec.Template +func buildStsPodTemplate(rsm workloads.ReplicatedStateMachine, envConfig corev1.ConfigMap) *corev1.PodTemplateSpec { + template := rsm.Spec.Template labels := template.Labels if labels == nil { labels = make(map[string]string, 2) } - labels[model.AppInstanceLabelKey] = csSet.Name - labels[model.KBManagedByKey] = kindConsensusSet + labels[constant.AppInstanceLabelKey] = rsm.Name + labels[constant.KBManagedByKey] = kindReplicatedStateMachine template.Labels = labels // inject env ConfigMap into workload pods only @@ -216,14 +217,14 @@ func buildStsPodTemplate(csSet workloads.ConsensusSet, envConfig corev1.ConfigMa }}) } - injectRoleObservationContainer(csSet, &template) + injectRoleObservationContainer(rsm, &template) return &template } -func injectRoleObservationContainer(csSet workloads.ConsensusSet, template *corev1.PodTemplateSpec) { - roleObservation := csSet.Spec.RoleObservation - credential := csSet.Spec.Credential +func injectRoleObservationContainer(rsm workloads.ReplicatedStateMachine, template *corev1.PodTemplateSpec) { + roleObservation := rsm.Spec.RoleObservation + credential := rsm.Spec.Credential credentialEnv := make([]corev1.EnvVar, 0) if credential != nil { credentialEnv = append(credentialEnv, @@ -245,9 +246,9 @@ func injectRoleObservationContainer(csSet workloads.ConsensusSet, template *core svcPort = findNextAvailablePort(svcPort, allUsedPorts) actionSvcPorts = append(actionSvcPorts, svcPort) } - injectObservationActionContainer(csSet, template, actionSvcPorts, credentialEnv) + injectObservationActionContainer(rsm, template, actionSvcPorts, credentialEnv) actionSvcList, _ := json.Marshal(actionSvcPorts) - injectRoleObserveContainer(csSet, template, string(actionSvcList), credentialEnv) + injectRoleObserveContainer(rsm, template, string(actionSvcList), credentialEnv) } func findNextAvailablePort(base int32, allUsedPorts []int32) int32 { @@ -277,10 +278,10 @@ func findAllUsedPorts(template *corev1.PodTemplateSpec) []int32 { return allUsedPorts } -func injectRoleObserveContainer(csSet workloads.ConsensusSet, template *corev1.PodTemplateSpec, actionSvcList string, credentialEnv []corev1.EnvVar) { +func injectRoleObserveContainer(rsm workloads.ReplicatedStateMachine, template *corev1.PodTemplateSpec, actionSvcList string, credentialEnv []corev1.EnvVar) { // compute parameters for role observation container - roleObservation := csSet.Spec.RoleObservation - credential := csSet.Spec.Credential + roleObservation := rsm.Spec.RoleObservation + credential := rsm.Spec.Credential image := viper.GetString("ROLE_OBSERVATION_IMAGE") if len(image) == 0 { image = defaultRoleObservationImage @@ -311,7 +312,7 @@ func injectRoleObserveContainer(csSet workloads.ConsensusSet, template *corev1.P }) } // find service port of th db engine - servicePort := findSvcPort(csSet) + servicePort := findSvcPort(rsm) if servicePort > 0 { env = append(env, corev1.EnvVar{ @@ -330,10 +331,9 @@ func injectRoleObserveContainer(csSet workloads.ConsensusSet, template *corev1.P Name: roleObservationName, Image: image, ImagePullPolicy: "IfNotPresent", - Command: []string{"role-agent", + Command: []string{ + "role-agent", "--port", strconv.Itoa(observationDaemonPort), - "--protocol", "http", - "--log-level", "info", }, Ports: []corev1.ContainerPort{{ ContainerPort: int32(observationDaemonPort), @@ -344,10 +344,7 @@ func injectRoleObserveContainer(csSet workloads.ConsensusSet, template *corev1.P ProbeHandler: corev1.ProbeHandler{ Exec: &corev1.ExecAction{ Command: []string{ - "curl", "-X", "POST", - "--max-time", "1", - "--fail-with-body", "--silent", - "-H", "Content-ComponentDefRef: application/json", + "/bin/grpc_health_probe", roleObserveURI, }, }, @@ -365,7 +362,7 @@ func injectRoleObserveContainer(csSet workloads.ConsensusSet, template *corev1.P template.Spec.Containers = append(template.Spec.Containers, container) } -func injectObservationActionContainer(csSet workloads.ConsensusSet, template *corev1.PodTemplateSpec, actionSvcPorts []int32, credentialEnv []corev1.EnvVar) { +func injectObservationActionContainer(rsm workloads.ReplicatedStateMachine, template *corev1.PodTemplateSpec, actionSvcPorts []int32, credentialEnv []corev1.EnvVar) { // inject shared volume agentVolume := corev1.Volume{ Name: roleAgentVolumeName, @@ -395,7 +392,7 @@ func injectObservationActionContainer(csSet workloads.ConsensusSet, template *co template.Spec.InitContainers = append(template.Spec.InitContainers, initContainer) // inject action containers based on utility images - for i, action := range csSet.Spec.RoleObservation.ObservationActions { + for i, action := range rsm.Spec.RoleObservation.ObservationActions { image := action.Image if len(image) == 0 { image = defaultActionImage @@ -420,11 +417,10 @@ func injectObservationActionContainer(csSet workloads.ConsensusSet, template *co } } -func buildEnvConfigData(set workloads.ConsensusSet) map[string]string { +func buildEnvConfigData(set workloads.ReplicatedStateMachine) map[string]string { envData := map[string]string{} - prefix := constant.KBPrefix + "_" + strings.ToUpper(set.Name) + "_" - prefix = strings.ReplaceAll(prefix, "-", "_") + prefix := constant.KBPrefix + "_RSM_" svcName := getHeadlessSvcName(set) envData[prefix+"N"] = strconv.Itoa(int(set.Spec.Replicas)) for i := 0; i < int(set.Spec.Replicas); i++ { @@ -433,7 +429,7 @@ func buildEnvConfigData(set workloads.ConsensusSet) map[string]string { envData[hostNameTplKey] = fmt.Sprintf("%s.%s", hostNameTplValue, svcName) } - // build consensus env from set.Status.MembersStatus + // build member related envs from set.Status.MembersStatus followers := "" for _, memberStatus := range set.Status.MembersStatus { if memberStatus.PodName == "" || memberStatus.PodName == defaultPodName { @@ -456,9 +452,7 @@ func buildEnvConfigData(set workloads.ConsensusSet) map[string]string { // set owner uid to let pod know if the owner is recreated uid := string(set.UID) envData[prefix+"OWNER_UID"] = uid - envData[constant.KBPrefix+"_CONSENSUS_SET_OWNER_UID_SUFFIX8"] = uid[len(uid)-4:] + envData[prefix+"OWNER_UID_SUFFIX8"] = uid[len(uid)-4:] return envData } - -var _ graph.Transformer = &ObjectGenerationTransformer{} diff --git a/internal/controller/rsm/transformer_objection_generation_test.go b/internal/controller/rsm/transformer_objection_generation_test.go new file mode 100644 index 00000000000..82dd8b3b245 --- /dev/null +++ b/internal/controller/rsm/transformer_objection_generation_test.go @@ -0,0 +1,110 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package rsm + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/golang/mock/gomock" + apps "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + policyv1 "k8s.io/api/policy/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/apecloud/kubeblocks/internal/controller/builder" + "github.com/apecloud/kubeblocks/internal/controller/graph" + "github.com/apecloud/kubeblocks/internal/controller/model" +) + +var _ = Describe("object generation transformer test.", func() { + BeforeEach(func() { + rsm = builder.NewReplicatedStateMachineBuilder(namespace, name). + SetUID(uid). + SetRoles(roles). + SetService(service). + SetCredential(credential). + SetTemplate(template). + SetObservationActions(observeActions). + GetObject() + + transCtx = &rsmTransformContext{ + Context: ctx, + Client: k8sMock, + EventRecorder: nil, + Logger: logger, + rsmOrig: rsm.DeepCopy(), + rsm: rsm, + } + }) + + Context("Transform function", func() { + It("should work well", func() { + sts := builder.NewStatefulSetBuilder(namespace, name).GetObject() + headlessSvc := builder.NewHeadlessServiceBuilder(name, getHeadlessSvcName(*rsm)).GetObject() + svc := builder.NewServiceBuilder(name, name).GetObject() + env := builder.NewConfigMapBuilder(name, name+"-env").GetObject() + k8sMock.EXPECT(). + List(gomock.Any(), &apps.StatefulSetList{}, gomock.Any()). + DoAndReturn(func(_ context.Context, list *apps.StatefulSetList, _ ...client.ListOption) error { + return nil + }).Times(1) + k8sMock.EXPECT(). + List(gomock.Any(), &corev1.ServiceList{}, gomock.Any()). + DoAndReturn(func(_ context.Context, list *corev1.ServiceList, _ ...client.ListOption) error { + return nil + }).Times(1) + k8sMock.EXPECT(). + List(gomock.Any(), &corev1.SecretList{}, gomock.Any()). + DoAndReturn(func(_ context.Context, list *corev1.SecretList, _ ...client.ListOption) error { + return nil + }).Times(1) + k8sMock.EXPECT(). + List(gomock.Any(), &corev1.ConfigMapList{}, gomock.Any()). + DoAndReturn(func(_ context.Context, list *corev1.ConfigMapList, _ ...client.ListOption) error { + return nil + }).Times(1) + k8sMock.EXPECT(). + List(gomock.Any(), &policyv1.PodDisruptionBudgetList{}, gomock.Any()). + DoAndReturn(func(_ context.Context, list *policyv1.PodDisruptionBudgetList, _ ...client.ListOption) error { + return nil + }).Times(1) + + dagExpected := graph.NewDAG() + model.PrepareStatus(dagExpected, transCtx.rsmOrig, transCtx.rsm) + model.PrepareCreate(dagExpected, sts) + model.PrepareCreate(dagExpected, headlessSvc) + model.PrepareCreate(dagExpected, svc) + model.PrepareCreate(dagExpected, env) + model.DependOn(dagExpected, sts, headlessSvc, svc, env) + + // do Transform + dag := graph.NewDAG() + model.PrepareStatus(dag, transCtx.rsmOrig, transCtx.rsm) + transformer := ObjectGenerationTransformer{} + Expect(transformer.Transform(transCtx, dag)).Should(Succeed()) + + // compare DAGs + Expect(dag.Equals(dagExpected, less)).Should(BeTrue()) + }) + }) +}) diff --git a/internal/controller/consensusset/transformer_status.go b/internal/controller/rsm/transformer_status.go similarity index 54% rename from internal/controller/consensusset/transformer_status.go rename to internal/controller/rsm/transformer_status.go index f8b5a165334..3ac38ecbf0e 100644 --- a/internal/controller/consensusset/transformer_status.go +++ b/internal/controller/rsm/transformer_status.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package consensusset +package rsm import ( apps "k8s.io/api/apps/v1" @@ -27,46 +27,48 @@ import ( "github.com/apecloud/kubeblocks/internal/controller/model" ) -// CSSetStatusTransformer computes the current status: -// 1. read the underlying sts's status and copy them to consensus set's status -// 2. read pod role label and update consensus set's status role fields -type CSSetStatusTransformer struct{} +// ObjectStatusTransformer computes the current status: +// 1. read the underlying sts's status and copy them to the primary object's status +// 2. read pod role label and update the primary object's status role fields +type ObjectStatusTransformer struct{} -func (t *CSSetStatusTransformer) Transform(ctx graph.TransformContext, dag *graph.DAG) error { - transCtx, _ := ctx.(*CSSetTransformContext) - csSet := transCtx.CSSet - origCSSet := transCtx.OrigCSSet +var _ graph.Transformer = &ObjectStatusTransformer{} + +func (t *ObjectStatusTransformer) Transform(ctx graph.TransformContext, dag *graph.DAG) error { + transCtx, _ := ctx.(*rsmTransformContext) + rsm := transCtx.rsm + rsmOrig := transCtx.rsmOrig // fast return - if model.IsObjectDeleting(origCSSet) { + if model.IsObjectDeleting(rsmOrig) { return nil } switch { - case model.IsObjectUpdating(origCSSet): - // use consensus set's generation instead of sts's - csSet.Status.ObservedGeneration = csSet.Generation + case model.IsObjectUpdating(rsmOrig): + // use rsm's generation instead of sts's + rsm.Status.ObservedGeneration = rsm.Generation // hack for sts initialization error: is invalid: status.replicas: Required value - if csSet.Status.Replicas == 0 { - csSet.Status.Replicas = csSet.Spec.Replicas + if rsm.Status.Replicas == 0 { + rsm.Status.Replicas = rsm.Spec.Replicas } - case model.IsObjectStatusUpdating(origCSSet): + case model.IsObjectStatusUpdating(rsmOrig): // read the underlying sts sts := &apps.StatefulSet{} - if err := transCtx.Client.Get(transCtx.Context, client.ObjectKeyFromObject(csSet), sts); err != nil { + if err := transCtx.Client.Get(transCtx.Context, client.ObjectKeyFromObject(rsm), sts); err != nil { return err } - // keep csSet's ObservedGeneration to avoid override by sts's ObservedGeneration - generation := csSet.Status.ObservedGeneration - csSet.Status.StatefulSetStatus = sts.Status - csSet.Status.ObservedGeneration = generation - // read all pods belong to the sts, hence belong to our consensus set + // keep rsm's ObservedGeneration to avoid override by sts's ObservedGeneration + generation := rsm.Status.ObservedGeneration + rsm.Status.StatefulSetStatus = sts.Status + rsm.Status.ObservedGeneration = generation + // read all pods belong to the sts, hence belong to the rsm pods, err := getPodsOfStatefulSet(transCtx.Context, transCtx.Client, sts) if err != nil { return err } // update role fields - setMembersStatus(csSet, pods) + setMembersStatus(rsm, pods) } if err := model.PrepareRootStatus(dag); err != nil { @@ -75,5 +77,3 @@ func (t *CSSetStatusTransformer) Transform(ctx graph.TransformContext, dag *grap return nil } - -var _ graph.Transformer = &CSSetStatusTransformer{} diff --git a/internal/controller/rsm/transformer_status_test.go b/internal/controller/rsm/transformer_status_test.go new file mode 100644 index 00000000000..dab98ec03d8 --- /dev/null +++ b/internal/controller/rsm/transformer_status_test.go @@ -0,0 +1,151 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package rsm + +import ( + "context" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" + "github.com/apecloud/kubeblocks/internal/controller/builder" + "github.com/apecloud/kubeblocks/internal/controller/model" + "github.com/golang/mock/gomock" + apps "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe("object status transformer test.", func() { + BeforeEach(func() { + rsm = builder.NewReplicatedStateMachineBuilder(namespace, name). + SetUID(uid). + SetReplicas(3). + SetRoles(roles). + SetMembershipReconfiguration(reconfiguration). + SetService(service). + GetObject() + + transCtx = &rsmTransformContext{ + Context: ctx, + Client: k8sMock, + EventRecorder: nil, + Logger: logger, + rsmOrig: rsm.DeepCopy(), + rsm: rsm, + } + + dag = mockDAG() + transformer = &ObjectStatusTransformer{} + }) + + Context("rsm deletion", func() { + It("should return directly", func() { + ts := metav1.NewTime(time.Now()) + transCtx.rsmOrig.DeletionTimestamp = &ts + transCtx.rsm.DeletionTimestamp = &ts + Expect(transformer.Transform(transCtx, dag)).Should(Succeed()) + dagExpected := mockDAG() + Expect(dag.Equals(dagExpected, less)).Should(BeTrue()) + }) + }) + + Context("rsm update", func() { + It("should work well", func() { + generation := int64(2) + rsm.Generation = generation + rsm.Status.ObservedGeneration = generation - 1 + transCtx.rsmOrig = rsm.DeepCopy() + Expect(transformer.Transform(transCtx, dag)).Should(Succeed()) + dagExpected := mockDAG() + Expect(dag.Equals(dagExpected, less)).Should(BeTrue()) + root, err := model.FindRootVertex(dag) + Expect(err).Should(BeNil()) + Expect(root.Action).ShouldNot(BeNil()) + Expect(*root.Action).Should(Equal(model.STATUS)) + rsmNew, ok := root.Obj.(*workloads.ReplicatedStateMachine) + Expect(ok).Should(BeTrue()) + Expect(rsmNew.Generation).Should(Equal(generation)) + Expect(rsmNew.Status.ObservedGeneration).Should(Equal(generation)) + Expect(rsmNew.Status.Replicas).Should(Equal(rsmNew.Spec.Replicas)) + }) + }) + + Context("rsm status update", func() { + It("should work well", func() { + generation := int64(2) + rsm.Generation = generation + rsm.Status.ObservedGeneration = generation + transCtx.rsmOrig = rsm.DeepCopy() + sts := mockUnderlyingSts(*rsm, 1) + k8sMock.EXPECT(). + Get(gomock.Any(), gomock.Any(), &apps.StatefulSet{}, gomock.Any()). + DoAndReturn(func(_ context.Context, objKey client.ObjectKey, obj *apps.StatefulSet, _ ...client.GetOption) error { + Expect(obj).ShouldNot(BeNil()) + *obj = *sts + return nil + }).Times(1) + pod0 := builder.NewPodBuilder(namespace, getPodName(rsm.Name, 0)). + AddLabels(roleLabelKey, "follower"). + GetObject() + pod1 := builder.NewPodBuilder(namespace, getPodName(name, 1)). + AddLabels(roleLabelKey, "leader"). + GetObject() + pod2 := builder.NewPodBuilder(namespace, getPodName(name, 2)). + AddLabels(roleLabelKey, "follower"). + GetObject() + makePodUpdateReady("new-revision", pod0, pod1, pod2) + k8sMock.EXPECT(). + List(gomock.Any(), &corev1.PodList{}, gomock.Any()). + DoAndReturn(func(_ context.Context, list *corev1.PodList, _ ...client.ListOption) error { + Expect(list).ShouldNot(BeNil()) + list.Items = []corev1.Pod{*pod0, *pod1, *pod2} + return nil + }).Times(1) + Expect(transformer.Transform(transCtx, dag)).Should(Succeed()) + dagExpected := mockDAG() + Expect(dag.Equals(dagExpected, less)).Should(BeTrue()) + root, err := model.FindRootVertex(dag) + Expect(err).Should(BeNil()) + Expect(root.Action).ShouldNot(BeNil()) + Expect(*root.Action).Should(Equal(model.STATUS)) + rsmNew, ok := root.Obj.(*workloads.ReplicatedStateMachine) + Expect(ok).Should(BeTrue()) + Expect(rsmNew.Status.ObservedGeneration).Should(Equal(generation)) + // the only difference between rsm.status.StatefulSetStatus and sts.status is ObservedGeneration + // for less coding + rsmNew.Status.ObservedGeneration = sts.Status.ObservedGeneration + Expect(rsmNew.Status.StatefulSetStatus).Should(Equal(sts.Status)) + pods := []*corev1.Pod{pod0, pod1, pod2} + for _, pod := range pods { + matched := false + for _, status := range rsmNew.Status.MembersStatus { + if status.PodName == pod.Name && status.ReplicaRole.Name == pod.Labels[roleLabelKey] { + matched = true + } + } + Expect(matched).Should(BeTrue()) + } + }) + }) +}) diff --git a/internal/controller/consensusset/transformer_update_strategy.go b/internal/controller/rsm/transformer_update_strategy.go similarity index 72% rename from internal/controller/consensusset/transformer_update_strategy.go rename to internal/controller/rsm/transformer_update_strategy.go index 7253845d052..f3e6af13d0a 100644 --- a/internal/controller/consensusset/transformer_update_strategy.go +++ b/internal/controller/rsm/transformer_update_strategy.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package consensusset +package rsm import ( apps "k8s.io/api/apps/v1" @@ -32,20 +32,22 @@ import ( type UpdateStrategyTransformer struct{} +var _ graph.Transformer = &UpdateStrategyTransformer{} + func (t *UpdateStrategyTransformer) Transform(ctx graph.TransformContext, dag *graph.DAG) error { - transCtx, _ := ctx.(*CSSetTransformContext) - csSet := transCtx.CSSet - origCSSet := transCtx.OrigCSSet - if !model.IsObjectStatusUpdating(origCSSet) { + transCtx, _ := ctx.(*rsmTransformContext) + rsm := transCtx.rsm + rsmOrig := transCtx.rsmOrig + if !model.IsObjectStatusUpdating(rsmOrig) { return nil } // read the underlying sts stsObj := &apps.StatefulSet{} - if err := transCtx.Client.Get(transCtx.Context, client.ObjectKeyFromObject(csSet), stsObj); err != nil { + if err := transCtx.Client.Get(transCtx.Context, client.ObjectKeyFromObject(rsm), stsObj); err != nil { return err } - // read all pods belong to the sts, hence belong to our consensus set + // read all pods belong to the sts, hence belong to the rsm pods, err := getPodsOfStatefulSet(transCtx.Context, transCtx.Client, stsObj) if err != nil { return err @@ -65,14 +67,14 @@ func (t *UpdateStrategyTransformer) Transform(ctx graph.TransformContext, dag *g return nil } - // we don't check whether pod role label present: prefer stateful set's Update done than role probing ready - // TODO(free6om): maybe should wait consensus ready for high availability: + // we don't check whether pod role label present: prefer stateful_set's Update done than role probing ready + // TODO(free6om): maybe should wait rsm ready for high availability: // 1. after some pods updated // 2. before switchover // 3. after switchover done // generate the pods Deletion plan - plan := newUpdatePlan(*csSet, pods) + plan := newUpdatePlan(*rsm, pods) podsToBeUpdated, err := plan.execute() if err != nil { return err @@ -94,13 +96,13 @@ func (t *UpdateStrategyTransformer) Transform(ctx graph.TransformContext, dag *g } // return true means action created or in progress, should wait it to the termination state -func doSwitchoverIfNeeded(transCtx *CSSetTransformContext, dag *graph.DAG, pods []corev1.Pod, podsToBeUpdated []*corev1.Pod) (bool, error) { +func doSwitchoverIfNeeded(transCtx *rsmTransformContext, dag *graph.DAG, pods []corev1.Pod, podsToBeUpdated []*corev1.Pod) (bool, error) { if len(podsToBeUpdated) == 0 { return false, nil } - csSet := transCtx.CSSet - if !shouldSwitchover(csSet, podsToBeUpdated) { + rsm := transCtx.rsm + if !shouldSwitchover(rsm, podsToBeUpdated) { return false, nil } @@ -109,16 +111,16 @@ func doSwitchoverIfNeeded(transCtx *CSSetTransformContext, dag *graph.DAG, pods return true, err } if len(actionList) == 0 { - return true, createSwitchoverAction(dag, csSet, pods) + return true, createSwitchoverAction(dag, rsm, pods) } // switch status if found: // 1. succeed means action executed successfully, - // but the consensus cluster may have false positive(apecloud-mysql only?), + // but some kind of cluster may have false positive(apecloud-mysql only?), // we can't wait forever, update is more important. // do the next pod update stage // 2. failed means action executed failed, - // but this doesn't mean the consensus cluster didn't switchover(again, apecloud-mysql only?) + // but this doesn't mean the cluster didn't switchover(again, apecloud-mysql only?) // we can't do anything either in this situation, emit failed event and // do the next pod update state // 3. in progress means action still running, @@ -138,29 +140,29 @@ func doSwitchoverIfNeeded(transCtx *CSSetTransformContext, dag *graph.DAG, pods return false, nil } -func createSwitchoverAction(dag *graph.DAG, csSet *workloads.ConsensusSet, pods []corev1.Pod) error { - leader := getLeaderPodName(csSet.Status.MembersStatus) - targetOrdinal := selectSwitchoverTarget(csSet, pods) - target := getPodName(csSet.Name, targetOrdinal) +func createSwitchoverAction(dag *graph.DAG, rsm *workloads.ReplicatedStateMachine, pods []corev1.Pod) error { + leader := getLeaderPodName(rsm.Status.MembersStatus) + targetOrdinal := selectSwitchoverTarget(rsm, pods) + target := getPodName(rsm.Name, targetOrdinal) actionType := jobTypeSwitchover ordinal, _ := getPodOrdinal(leader) - actionName := getActionName(csSet.Name, int(csSet.Generation), ordinal, actionType) - action := buildAction(csSet, actionName, actionType, jobScenarioUpdate, leader, target) + actionName := getActionName(rsm.Name, int(rsm.Generation), ordinal, actionType) + action := buildAction(rsm, actionName, actionType, jobScenarioUpdate, leader, target) // don't do cluster abnormal status analysis, prefer faster update process - return createAction(dag, csSet, action) + return createAction(dag, rsm, action) } -func selectSwitchoverTarget(csSet *workloads.ConsensusSet, pods []corev1.Pod) int { +func selectSwitchoverTarget(rsm *workloads.ReplicatedStateMachine, pods []corev1.Pod) int { var podUpdated, podUpdatedWithLabel string for _, pod := range pods { - if intctrlutil.GetPodRevision(&pod) != csSet.Status.UpdateRevision { + if intctrlutil.GetPodRevision(&pod) != rsm.Status.UpdateRevision { continue } if len(podUpdated) == 0 { podUpdated = pod.Name } - if _, ok := pod.Labels[model.RoleLabelKey]; !ok { + if _, ok := pod.Labels[roleLabelKey]; !ok { continue } if len(podUpdatedWithLabel) == 0 { @@ -181,8 +183,8 @@ func selectSwitchoverTarget(csSet *workloads.ConsensusSet, pods []corev1.Pod) in return ordinal } -func shouldSwitchover(csSet *workloads.ConsensusSet, podsToBeUpdated []*corev1.Pod) bool { - leaderName := getLeaderPodName(csSet.Status.MembersStatus) +func shouldSwitchover(rsm *workloads.ReplicatedStateMachine, podsToBeUpdated []*corev1.Pod) bool { + leaderName := getLeaderPodName(rsm.Status.MembersStatus) for _, pod := range podsToBeUpdated { if pod.Name == leaderName { return true @@ -190,5 +192,3 @@ func shouldSwitchover(csSet *workloads.ConsensusSet, podsToBeUpdated []*corev1.P } return false } - -var _ graph.Transformer = &UpdateStrategyTransformer{} diff --git a/internal/controller/rsm/transformer_update_strategy_test.go b/internal/controller/rsm/transformer_update_strategy_test.go new file mode 100644 index 00000000000..035b00d2eaf --- /dev/null +++ b/internal/controller/rsm/transformer_update_strategy_test.go @@ -0,0 +1,266 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package rsm + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/golang/mock/gomock" + apps "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" + "github.com/apecloud/kubeblocks/internal/constant" + "github.com/apecloud/kubeblocks/internal/controller/builder" + "github.com/apecloud/kubeblocks/internal/controller/graph" + "github.com/apecloud/kubeblocks/internal/controller/model" +) + +var _ = Describe("update strategy transformer test.", func() { + BeforeEach(func() { + rsm = builder.NewReplicatedStateMachineBuilder(namespace, name). + SetUID(uid). + SetReplicas(3). + SetRoles(roles). + SetService(service). + GetObject() + rsm.Status.UpdateRevision = newRevision + membersStatus := []workloads.MemberStatus{ + { + PodName: getPodName(rsm.Name, 1), + ReplicaRole: workloads.ReplicaRole{Name: "leader", IsLeader: true}, + }, + { + PodName: getPodName(rsm.Name, 0), + ReplicaRole: workloads.ReplicaRole{Name: "follower"}, + }, + { + PodName: getPodName(rsm.Name, 2), + ReplicaRole: workloads.ReplicaRole{Name: "follower"}, + }, + } + rsm.Status.MembersStatus = membersStatus + + transCtx = &rsmTransformContext{ + Context: ctx, + Client: k8sMock, + EventRecorder: nil, + Logger: logger, + rsmOrig: rsm.DeepCopy(), + rsm: rsm, + } + + dag = mockDAG() + transformer = &UpdateStrategyTransformer{} + }) + + Context("RSM is not in status updating", func() { + It("should return directly", func() { + transCtx.rsmOrig.Generation = 2 + transCtx.rsmOrig.Status.ObservedGeneration = 1 + dagExpected := graph.NewDAG() + model.PrepareStatus(dagExpected, transCtx.rsmOrig, transCtx.rsm) + + Expect(transformer.Transform(transCtx, dag)).Should(Succeed()) + Expect(dag.Equals(dagExpected, less)).Should(BeTrue()) + }) + }) + + Context("the underlying sts is not ready", func() { + It("should return directly", func() { + transCtx.rsmOrig.Generation = 2 + transCtx.rsmOrig.Status.ObservedGeneration = 2 + k8sMock.EXPECT(). + Get(gomock.Any(), gomock.Any(), &apps.StatefulSet{}, gomock.Any()). + DoAndReturn(func(_ context.Context, objKey client.ObjectKey, obj *apps.StatefulSet, _ ...client.GetOption) error { + Expect(obj).ShouldNot(BeNil()) + obj.Namespace = objKey.Namespace + obj.Name = objKey.Name + obj.Generation = 2 + obj.Status.ObservedGeneration = 1 + return nil + }).Times(1) + k8sMock.EXPECT(). + List(gomock.Any(), &corev1.PodList{}, gomock.Any()). + DoAndReturn(func(_ context.Context, list *corev1.PodList, _ ...client.ListOption) error { + return nil + }).Times(1) + dagExpected := graph.NewDAG() + model.PrepareStatus(dagExpected, transCtx.rsmOrig, transCtx.rsm) + + Expect(transformer.Transform(transCtx, dag)).Should(Succeed()) + Expect(dag.Equals(dagExpected, less)).Should(BeTrue()) + }) + }) + + Context("pods are not ready", func() { + It("should return directly", func() { + transCtx.rsmOrig.Generation = 2 + transCtx.rsmOrig.Status.ObservedGeneration = 2 + k8sMock.EXPECT(). + Get(gomock.Any(), gomock.Any(), &apps.StatefulSet{}, gomock.Any()). + DoAndReturn(func(_ context.Context, objKey client.ObjectKey, obj *apps.StatefulSet, _ ...client.GetOption) error { + Expect(obj).ShouldNot(BeNil()) + obj.Namespace = objKey.Namespace + obj.Name = objKey.Name + obj.Generation = 2 + obj.Status.ObservedGeneration = obj.Generation + obj.Spec.Replicas = &rsm.Spec.Replicas + return nil + }).Times(1) + k8sMock.EXPECT(). + List(gomock.Any(), &corev1.PodList{}, gomock.Any()). + DoAndReturn(func(_ context.Context, list *corev1.PodList, _ ...client.ListOption) error { + return nil + }).Times(1) + dagExpected := graph.NewDAG() + model.PrepareStatus(dagExpected, transCtx.rsmOrig, transCtx.rsm) + + Expect(transformer.Transform(transCtx, dag)).Should(Succeed()) + Expect(dag.Equals(dagExpected, less)).Should(BeTrue()) + }) + }) + + Context("all ready for updating", func() { + It("should update all pods", func() { + transCtx.rsmOrig.Generation = 2 + transCtx.rsmOrig.Status.ObservedGeneration = 2 + rsm.Spec.UpdateStrategy = workloads.SerialUpdateStrategy + k8sMock.EXPECT(). + Get(gomock.Any(), gomock.Any(), &apps.StatefulSet{}, gomock.Any()). + DoAndReturn(func(_ context.Context, objKey client.ObjectKey, obj *apps.StatefulSet, _ ...client.GetOption) error { + Expect(obj).ShouldNot(BeNil()) + obj.Namespace = objKey.Namespace + obj.Name = objKey.Name + obj.Generation = 2 + obj.Status.ObservedGeneration = obj.Generation + obj.Spec.Replicas = &rsm.Spec.Replicas + return nil + }).Times(4) + pod0 := builder.NewPodBuilder(namespace, getPodName(rsm.Name, 0)). + AddLabels(roleLabelKey, "follower"). + AddLabels(apps.StatefulSetRevisionLabel, oldRevision). + GetObject() + pod1 := builder.NewPodBuilder(namespace, getPodName(name, 1)). + AddLabels(roleLabelKey, "leader"). + AddLabels(apps.StatefulSetRevisionLabel, oldRevision). + GetObject() + pod2 := builder.NewPodBuilder(namespace, getPodName(name, 2)). + AddLabels(roleLabelKey, "follower"). + AddLabels(apps.StatefulSetRevisionLabel, oldRevision). + GetObject() + k8sMock.EXPECT(). + List(gomock.Any(), &corev1.PodList{}, gomock.Any()). + DoAndReturn(func(_ context.Context, list *corev1.PodList, _ ...client.ListOption) error { + Expect(list).ShouldNot(BeNil()) + list.Items = []corev1.Pod{*pod0, *pod1, *pod2} + return nil + }).Times(1) + + By("update the first pod") + dagExpected := graph.NewDAG() + model.PrepareStatus(dagExpected, transCtx.rsmOrig, transCtx.rsm) + model.PrepareDelete(dagExpected, pod0) + Expect(transformer.Transform(transCtx, dag)).Should(Succeed()) + Expect(dag.Equals(dagExpected, less)).Should(BeTrue()) + + By("update the second pod") + makePodUpdateReady(newRevision, pod0) + dagExpected = graph.NewDAG() + model.PrepareStatus(dagExpected, transCtx.rsmOrig, transCtx.rsm) + model.PrepareDelete(dagExpected, pod2) + k8sMock.EXPECT(). + List(gomock.Any(), &corev1.PodList{}, gomock.Any()). + DoAndReturn(func(_ context.Context, list *corev1.PodList, _ ...client.ListOption) error { + Expect(list).ShouldNot(BeNil()) + list.Items = []corev1.Pod{*pod0, *pod1, *pod2} + return nil + }).Times(1) + dag = graph.NewDAG() + model.PrepareStatus(dag, transCtx.rsmOrig, transCtx.rsm) + Expect(transformer.Transform(transCtx, dag)).Should(Succeed()) + Expect(dag.Equals(dagExpected, less)).Should(BeTrue()) + + By("switchover") + makePodUpdateReady(newRevision, pod2) + dagExpected = graph.NewDAG() + model.PrepareStatus(dagExpected, transCtx.rsmOrig, transCtx.rsm) + actionName := getActionName(rsm.Name, int(rsm.Generation), 1, jobTypeSwitchover) + action := builder.NewJobBuilder(name, actionName).GetObject() + model.PrepareCreate(dagExpected, action) + k8sMock.EXPECT(). + List(gomock.Any(), &corev1.PodList{}, gomock.Any()). + DoAndReturn(func(_ context.Context, list *corev1.PodList, _ ...client.ListOption) error { + Expect(list).ShouldNot(BeNil()) + list.Items = []corev1.Pod{*pod0, *pod1, *pod2} + return nil + }).Times(1) + k8sMock.EXPECT(). + List(gomock.Any(), &batchv1.JobList{}, gomock.Any()). + DoAndReturn(func(_ context.Context, list *batchv1.JobList, _ ...client.ListOption) error { + return nil + }).Times(1) + dag = graph.NewDAG() + model.PrepareStatus(dag, transCtx.rsmOrig, transCtx.rsm) + Expect(transformer.Transform(transCtx, dag)).Should(Succeed()) + Expect(dag.Equals(dagExpected, less)).Should(BeTrue()) + + By("update the last(leader) pod") + dagExpected = graph.NewDAG() + model.PrepareStatus(dagExpected, transCtx.rsmOrig, transCtx.rsm) + action = builder.NewJobBuilder(name, actionName). + AddLabelsInMap(map[string]string{ + constant.AppInstanceLabelKey: rsm.Name, + constant.KBManagedByKey: kindReplicatedStateMachine, + jobScenarioLabel: jobScenarioUpdate, + jobTypeLabel: jobTypeSwitchover, + jobHandledLabel: jobHandledFalse, + }). + SetSuspend(false). + GetObject() + action.Status.Succeeded = 1 + model.PrepareUpdate(dagExpected, action, action) + model.PrepareDelete(dagExpected, pod1) + k8sMock.EXPECT(). + List(gomock.Any(), &corev1.PodList{}, gomock.Any()). + DoAndReturn(func(_ context.Context, list *corev1.PodList, _ ...client.ListOption) error { + Expect(list).ShouldNot(BeNil()) + list.Items = []corev1.Pod{*pod0, *pod1, *pod2} + return nil + }).Times(1) + k8sMock.EXPECT(). + List(gomock.Any(), &batchv1.JobList{}, gomock.Any()). + DoAndReturn(func(_ context.Context, list *batchv1.JobList, _ ...client.ListOption) error { + Expect(list).ShouldNot(BeNil()) + list.Items = []batchv1.Job{*action} + return nil + }).Times(1) + dag = graph.NewDAG() + model.PrepareStatus(dag, transCtx.rsmOrig, transCtx.rsm) + Expect(transformer.Transform(transCtx, dag)).Should(Succeed()) + Expect(dag.Equals(dagExpected, less)).Should(BeTrue()) + }) + }) +}) diff --git a/internal/controller/consensusset/types.go b/internal/controller/rsm/types.go similarity index 61% rename from internal/controller/consensusset/types.go rename to internal/controller/rsm/types.go index e16e8a994b5..a305d88f76d 100644 --- a/internal/controller/consensusset/types.go +++ b/internal/controller/rsm/types.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package consensusset +package rsm import ( "context" @@ -31,15 +31,18 @@ import ( ) const ( - kindConsensusSet = "ConsensusSet" + kindReplicatedStateMachine = "ReplicatedStateMachine" + + roleLabelKey = "kubeblocks.io/role" + rsmAccessModeLabelKey = "rsm.apps.kubeblocks.io/access-mode" defaultPodName = "Unknown" - csSetFinalizerName = "cs.workloads.kubeblocks.io/finalizer" + rsmFinalizerName = "rsm.workloads.kubeblocks.io/finalizer" - jobHandledLabel = "cs.workloads.kubeblocks.io/job-handled" - jobTypeLabel = "cs.workloads.kubeblocks.io/job-type" - jobScenarioLabel = "cs.workloads.kubeblocks.io/job-scenario" + jobHandledLabel = "rsm.workloads.kubeblocks.io/job-handled" + jobTypeLabel = "rsm.workloads.kubeblocks.io/job-type" + jobScenarioLabel = "rsm.workloads.kubeblocks.io/job-scenario" jobHandledTrue = "true" jobHandledFalse = "false" jobTypeSwitchover = "switchover" @@ -58,43 +61,43 @@ const ( shell2httpImage = "msoap/shell2http:1.16.0" shell2httpBinaryPath = "/app/shell2http" shell2httpServePath = "/role" - defaultRoleObservationImage = "apecloud/kubeblocks-role-observation:latest" - defaultRoleObservationDaemonPort = 3501 - roleObservationURIFormat = "http://localhost:%s/getRole" + defaultRoleObservationImage = "apecloud/kubeblocks-role-agent:latest" + defaultRoleObservationDaemonPort = 7373 + roleObservationURIFormat = "-addr=localhost:%s" defaultActionImage = "busybox:latest" - usernameCredentialVarName = "KB_CONSENSUS_SET_USERNAME" - passwordCredentialVarName = "KB_CONSENSUS_SET_PASSWORD" - servicePortVarName = "KB_CONSENSUS_SET_SERVICE_PORT" - actionSvcListVarName = "KB_CONSENSUS_SET_ACTION_SVC_LIST" - leaderHostVarName = "KB_CONSENSUS_SET_LEADER_HOST" - targetHostVarName = "KB_CONSENSUS_SET_TARGET_HOST" + usernameCredentialVarName = "KB_RSM_USERNAME" + passwordCredentialVarName = "KB_RSM_PASSWORD" + servicePortVarName = "KB_RSM_SERVICE_PORT" + actionSvcListVarName = "KB_RSM_ACTION_SVC_LIST" + leaderHostVarName = "KB_RSM_LEADER_HOST" + targetHostVarName = "KB_RSM_TARGET_HOST" roleObservationEventFieldPath = "spec.containers{" + roleObservationName + "}" actionSvcPortBase = int32(36500) ) -type CSSetTransformContext struct { +type rsmTransformContext struct { context.Context Client roclient.ReadonlyClient record.EventRecorder logr.Logger - CSSet *workloads.ConsensusSet - OrigCSSet *workloads.ConsensusSet + rsm *workloads.ReplicatedStateMachine + rsmOrig *workloads.ReplicatedStateMachine } -func (c *CSSetTransformContext) GetContext() context.Context { +func (c *rsmTransformContext) GetContext() context.Context { return c.Context } -func (c *CSSetTransformContext) GetClient() roclient.ReadonlyClient { +func (c *rsmTransformContext) GetClient() roclient.ReadonlyClient { return c.Client } -func (c *CSSetTransformContext) GetRecorder() record.EventRecorder { +func (c *rsmTransformContext) GetRecorder() record.EventRecorder { return c.EventRecorder } -func (c *CSSetTransformContext) GetLogger() logr.Logger { +func (c *rsmTransformContext) GetLogger() logr.Logger { return c.Logger } -var _ graph.TransformContext = &CSSetTransformContext{} +var _ graph.TransformContext = &rsmTransformContext{} diff --git a/internal/controller/consensusset/update_plan.go b/internal/controller/rsm/update_plan.go similarity index 88% rename from internal/controller/consensusset/update_plan.go rename to internal/controller/rsm/update_plan.go index d689949137f..5ac26fe1aee 100644 --- a/internal/controller/consensusset/update_plan.go +++ b/internal/controller/rsm/update_plan.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package consensusset +package rsm import ( "errors" @@ -39,12 +39,14 @@ type updatePlan interface { } type realUpdatePlan struct { - csSet workloads.ConsensusSet + rsm workloads.ReplicatedStateMachine pods []corev1.Pod dag *graph.DAG podsToBeUpdated []*corev1.Pod } +var _ updatePlan = &realUpdatePlan{} + var ( ErrContinue error ErrWait = errors.New("wait") @@ -69,7 +71,7 @@ func (p *realUpdatePlan) planWalkFunc(vertex graph.Vertex) error { } // if pod is the latest version, we do nothing - if intctrlutil.GetPodRevision(pod) == p.csSet.Status.UpdateRevision { + if intctrlutil.GetPodRevision(pod) == p.rsm.Status.UpdateRevision { if intctrlutil.PodIsReadyWithLabel(*pod) { return ErrContinue } else { @@ -88,11 +90,11 @@ func (p *realUpdatePlan) build() { root := &model.ObjectVertex{} p.dag.AddVertex(root) - rolePriorityMap := composeRolePriorityMap(p.csSet) + rolePriorityMap := composeRolePriorityMap(p.rsm) sortPods(p.pods, rolePriorityMap, false) // generate plan by UpdateStrategy - switch p.csSet.Spec.UpdateStrategy { + switch p.rsm.Spec.UpdateStrategy { case workloads.SerialUpdateStrategy: p.buildSerialUpdatePlan() case workloads.ParallelUpdateStrategy: @@ -150,8 +152,9 @@ func (p *realUpdatePlan) buildBestEffortParallelUpdatePlan(rolePriorityMap map[s // append leader podList = podList[end:] - for _, pod := range podList { - vertex := &model.ObjectVertex{Obj: &pod} + end = len(podList) + for i := 0; i < end; i++ { + vertex := &model.ObjectVertex{Obj: &podList[i]} p.dag.AddConnect(preVertex, vertex) } } @@ -159,8 +162,8 @@ func (p *realUpdatePlan) buildBestEffortParallelUpdatePlan(rolePriorityMap map[s // unknown & empty & leader & followers & learner func (p *realUpdatePlan) buildParallelUpdatePlan() { root, _ := model.FindRootVertex(p.dag) - for _, pod := range p.pods { - vertex := &model.ObjectVertex{Obj: &pod} + for i := range p.pods { + vertex := &model.ObjectVertex{Obj: &p.pods[i]} p.dag.AddConnect(root, vertex) } } @@ -168,8 +171,8 @@ func (p *realUpdatePlan) buildParallelUpdatePlan() { // unknown -> empty -> learner -> followers(none->readonly->readwrite) -> leader func (p *realUpdatePlan) buildSerialUpdatePlan() { preVertex, _ := model.FindRootVertex(p.dag) - for _, pod := range p.pods { - vertex := &model.ObjectVertex{Obj: &pod} + for i := range p.pods { + vertex := &model.ObjectVertex{Obj: &p.pods[i]} p.dag.AddConnect(preVertex, vertex) preVertex = vertex } @@ -184,12 +187,10 @@ func (p *realUpdatePlan) execute() ([]*corev1.Pod, error) { return p.podsToBeUpdated, nil } -func newUpdatePlan(csSet workloads.ConsensusSet, pods []corev1.Pod) updatePlan { +func newUpdatePlan(rsm workloads.ReplicatedStateMachine, pods []corev1.Pod) updatePlan { return &realUpdatePlan{ - csSet: csSet, - pods: pods, - dag: graph.NewDAG(), + rsm: rsm, + pods: pods, + dag: graph.NewDAG(), } } - -var _ updatePlan = &realUpdatePlan{} diff --git a/internal/controller/rsm/update_plan_test.go b/internal/controller/rsm/update_plan_test.go new file mode 100644 index 00000000000..b7d8c1aee24 --- /dev/null +++ b/internal/controller/rsm/update_plan_test.go @@ -0,0 +1,157 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package rsm + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + apps "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" + + workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" + "github.com/apecloud/kubeblocks/internal/controller/builder" +) + +var _ = Describe("update plan test.", func() { + BeforeEach(func() { + rsm = builder.NewReplicatedStateMachineBuilder(namespace, name).SetRoles(roles).GetObject() + rsm.Status.UpdateRevision = newRevision + }) + + Context("plan build&execute", func() { + var pod0, pod1, pod2, pod3, pod4, pod5, pod6 *corev1.Pod + + resetPods := func() { + pod0 = builder.NewPodBuilder(namespace, getPodName(name, 0)). + AddLabels(roleLabelKey, "follower"). + AddLabels(apps.StatefulSetRevisionLabel, oldRevision). + GetObject() + + pod1 = builder.NewPodBuilder(namespace, getPodName(name, 1)). + AddLabels(roleLabelKey, "logger"). + AddLabels(apps.StatefulSetRevisionLabel, oldRevision). + GetObject() + + pod2 = builder.NewPodBuilder(namespace, getPodName(name, 2)). + AddLabels(apps.StatefulSetRevisionLabel, oldRevision). + GetObject() + + pod3 = builder.NewPodBuilder(namespace, getPodName(name, 3)). + AddLabels(roleLabelKey, "learner"). + AddLabels(apps.StatefulSetRevisionLabel, oldRevision). + GetObject() + + pod4 = builder.NewPodBuilder(namespace, getPodName(name, 4)). + AddLabels(roleLabelKey, "candidate"). + AddLabels(apps.StatefulSetRevisionLabel, oldRevision). + GetObject() + + pod5 = builder.NewPodBuilder(namespace, getPodName(name, 5)). + AddLabels(roleLabelKey, "leader"). + AddLabels(apps.StatefulSetRevisionLabel, oldRevision). + GetObject() + + pod6 = builder.NewPodBuilder(namespace, getPodName(name, 6)). + AddLabels(roleLabelKey, "learner"). + AddLabels(apps.StatefulSetRevisionLabel, oldRevision). + GetObject() + } + + buildPodList := func() []corev1.Pod { + return []corev1.Pod{*pod0, *pod1, *pod2, *pod3, *pod4, *pod5, *pod6} + } + + toPodList := func(pods []*corev1.Pod) []corev1.Pod { + var list []corev1.Pod + for _, pod := range pods { + list = append(list, *pod) + } + return list + } + + equalPodList := func(podList1, podList2 []corev1.Pod) bool { + set1 := sets.New[string]() + set2 := sets.New[string]() + for _, pod := range podList1 { + set1.Insert(pod.Name) + } + for _, pod := range podList2 { + set2.Insert(pod.Name) + } + return set1.Equal(set2) + } + + checkPlan := func(expectedPlan [][]*corev1.Pod) { + for i, expectedPods := range expectedPlan { + if i > 0 { + makePodUpdateReady(newRevision, expectedPlan[i-1]...) + } + pods := buildPodList() + plan := newUpdatePlan(*rsm, pods) + podUpdateList, err := plan.execute() + Expect(err).Should(BeNil()) + podList := toPodList(podUpdateList) + expectedPodList := toPodList(expectedPods) + Expect(equalPodList(podList, expectedPodList)).Should(BeTrue()) + } + } + + BeforeEach(func() { + resetPods() + }) + + It("should work well in a serial plan", func() { + By("build a serial plan") + expectedPlan := [][]*corev1.Pod{ + {pod4}, + {pod2}, + {pod3}, + {pod6}, + {pod1}, + {pod0}, + {pod5}, + } + checkPlan(expectedPlan) + }) + + It("should work well in a parallel plan", func() { + By("build a parallel plan") + rsm.Spec.UpdateStrategy = workloads.ParallelUpdateStrategy + expectedPlan := [][]*corev1.Pod{ + {pod0, pod1, pod2, pod3, pod4, pod5, pod6}, + } + checkPlan(expectedPlan) + }) + + It("should work well in a best effort parallel", func() { + By("build a best effort parallel plan") + rsm.Spec.UpdateStrategy = workloads.BestEffortParallelUpdateStrategy + expectedPlan := [][]*corev1.Pod{ + {pod2, pod3, pod4, pod6}, + {pod1}, + {pod0}, + {pod5}, + } + checkPlan(expectedPlan) + }) + }) +}) diff --git a/internal/controller/consensusset/utils.go b/internal/controller/rsm/utils.go similarity index 73% rename from internal/controller/consensusset/utils.go rename to internal/controller/rsm/utils.go index 3e09f32ed83..97baf60673c 100644 --- a/internal/controller/consensusset/utils.go +++ b/internal/controller/rsm/utils.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package consensusset +package rsm import ( "context" @@ -27,6 +27,7 @@ import ( "strconv" "strings" + "github.com/go-logr/logr" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" @@ -74,7 +75,7 @@ func sortPods(pods []corev1.Pod, rolePriorityMap map[string]int, reverse bool) { sortMembers(pods, rolePriorityMap, getRoleFunc, getOrdinalFunc, reverse) } -func sortMembersStatus(membersStatus []workloads.ConsensusMemberStatus, rolePriorityMap map[string]int) { +func sortMembersStatus(membersStatus []workloads.MemberStatus, rolePriorityMap map[string]int) { getRoleFunc := func(i int) string { return membersStatus[i].Name } @@ -110,10 +111,10 @@ func sortMembers[T any](membersStatus []T, } // composeRolePriorityMap generates a priority map based on roles. -func composeRolePriorityMap(set workloads.ConsensusSet) map[string]int { +func composeRolePriorityMap(rsm workloads.ReplicatedStateMachine) map[string]int { rolePriorityMap := make(map[string]int, 0) rolePriorityMap[""] = emptyPriority - for _, role := range set.Spec.Roles { + for _, role := range rsm.Spec.Roles { roleName := strings.ToLower(role.Name) switch { case role.IsLeader: @@ -138,10 +139,10 @@ func composeRolePriorityMap(set workloads.ConsensusSet) map[string]int { // updatePodRoleLabel updates pod role label when internal container role changed func updatePodRoleLabel(cli client.Client, reqCtx intctrlutil.RequestCtx, - set workloads.ConsensusSet, + rsm workloads.ReplicatedStateMachine, pod *corev1.Pod, roleName string) error { ctx := reqCtx.Ctx - roleMap := composeRoleMap(set) + roleMap := composeRoleMap(rsm) // role not defined in CR, ignore it roleName = strings.ToLower(roleName) @@ -150,27 +151,27 @@ func updatePodRoleLabel(cli client.Client, role, ok := roleMap[roleName] switch ok { case true: - pod.Labels[model.RoleLabelKey] = role.Name - pod.Labels[model.ConsensusSetAccessModeLabelKey] = string(role.AccessMode) + pod.Labels[roleLabelKey] = role.Name + pod.Labels[rsmAccessModeLabelKey] = string(role.AccessMode) case false: - delete(pod.Labels, model.RoleLabelKey) - delete(pod.Labels, model.ConsensusSetAccessModeLabelKey) + delete(pod.Labels, roleLabelKey) + delete(pod.Labels, rsmAccessModeLabelKey) } return cli.Patch(ctx, pod, patch) } -func composeRoleMap(set workloads.ConsensusSet) map[string]workloads.ConsensusRole { - roleMap := make(map[string]workloads.ConsensusRole, 0) - for _, role := range set.Spec.Roles { +func composeRoleMap(rsm workloads.ReplicatedStateMachine) map[string]workloads.ReplicaRole { + roleMap := make(map[string]workloads.ReplicaRole, 0) + for _, role := range rsm.Spec.Roles { roleMap[strings.ToLower(role.Name)] = role } return roleMap } -func setMembersStatus(set *workloads.ConsensusSet, pods []corev1.Pod) { +func setMembersStatus(rsm *workloads.ReplicatedStateMachine, pods []corev1.Pod) { // compose new status - newMembersStatus := make([]workloads.ConsensusMemberStatus, 0) - roleMap := composeRoleMap(*set) + newMembersStatus := make([]workloads.MemberStatus, 0) + roleMap := composeRoleMap(*rsm) for _, pod := range pods { if !intctrlutil.PodIsReadyWithLabel(pod) { continue @@ -180,37 +181,38 @@ func setMembersStatus(set *workloads.ConsensusSet, pods []corev1.Pod) { if !ok { continue } - memberStatus := workloads.ConsensusMemberStatus{ - PodName: pod.Name, - ConsensusRole: role, + memberStatus := workloads.MemberStatus{ + PodName: pod.Name, + ReplicaRole: role, } newMembersStatus = append(newMembersStatus, memberStatus) } // members(pods) being scheduled should be kept - oldMemberMap := make(map[string]*workloads.ConsensusMemberStatus, len(set.Status.MembersStatus)) - for i, status := range set.Status.MembersStatus { - oldMemberMap[status.PodName] = &set.Status.MembersStatus[i] + oldMemberMap := make(map[string]*workloads.MemberStatus, len(rsm.Status.MembersStatus)) + for i, status := range rsm.Status.MembersStatus { + oldMemberMap[status.PodName] = &rsm.Status.MembersStatus[i] } - newMemberMap := make(map[string]*workloads.ConsensusMemberStatus, len(newMembersStatus)) + newMemberMap := make(map[string]*workloads.MemberStatus, len(newMembersStatus)) for i, status := range newMembersStatus { newMemberMap[status.PodName] = &newMembersStatus[i] } oldMemberSet := sets.KeySet(oldMemberMap) newMemberSet := sets.KeySet(newMemberMap) memberToKeepSet := oldMemberSet.Difference(newMemberSet) + // TODO(free6om): handle stale role in memberToKeepSet for podName := range memberToKeepSet { ordinal, _ := getPodOrdinal(podName) // members have left because of scale-in - if ordinal >= int(set.Spec.Replicas) { + if ordinal >= int(rsm.Spec.Replicas) { continue } newMembersStatus = append(newMembersStatus, *oldMemberMap[podName]) } - rolePriorityMap := composeRolePriorityMap(*set) + rolePriorityMap := composeRolePriorityMap(*rsm) sortMembersStatus(newMembersStatus, rolePriorityMap) - set.Status.MembersStatus = newMembersStatus + rsm.Status.MembersStatus = newMembersStatus } func getRoleName(pod corev1.Pod) string { @@ -238,8 +240,8 @@ func getPodsOfStatefulSet(ctx context.Context, cli roclient.ReadonlyClient, stsO if err := cli.List(ctx, podList, &client.ListOptions{Namespace: stsObj.Namespace}, client.MatchingLabels{ - model.KBManagedByKey: stsObj.Labels[model.KBManagedByKey], - model.AppInstanceLabelKey: stsObj.Labels[model.AppInstanceLabelKey], + constant.KBManagedByKey: stsObj.Labels[constant.KBManagedByKey], + constant.AppInstanceLabelKey: stsObj.Labels[constant.AppInstanceLabelKey], }); err != nil { return nil, err } @@ -252,13 +254,13 @@ func getPodsOfStatefulSet(ctx context.Context, cli roclient.ReadonlyClient, stsO return pods, nil } -func getHeadlessSvcName(set workloads.ConsensusSet) string { - return strings.Join([]string{set.Name, "headless"}, "-") +func getHeadlessSvcName(rsm workloads.ReplicatedStateMachine) string { + return strings.Join([]string{rsm.Name, "headless"}, "-") } -func findSvcPort(csSet workloads.ConsensusSet) int { - port := csSet.Spec.Service.Ports[0] - for _, c := range csSet.Spec.Template.Spec.Containers { +func findSvcPort(rsm workloads.ReplicatedStateMachine) int { + port := rsm.Spec.Service.Ports[0] + for _, c := range rsm.Spec.Template.Spec.Containers { for _, p := range c.Ports { if port.TargetPort.Type == intstr.String && p.Name == port.TargetPort.StrVal || port.TargetPort.Type == intstr.Int && p.ContainerPort == port.TargetPort.IntVal { @@ -269,13 +271,13 @@ func findSvcPort(csSet workloads.ConsensusSet) int { return 0 } -func getActionList(transCtx *CSSetTransformContext, actionScenario string) ([]*batchv1.Job, error) { +func getActionList(transCtx *rsmTransformContext, actionScenario string) ([]*batchv1.Job, error) { var actionList []*batchv1.Job ml := client.MatchingLabels{ - model.AppInstanceLabelKey: transCtx.CSSet.Name, - model.KBManagedByKey: kindConsensusSet, - jobScenarioLabel: actionScenario, - jobHandledLabel: jobHandledFalse, + constant.AppInstanceLabelKey: transCtx.rsm.Name, + constant.KBManagedByKey: kindReplicatedStateMachine, + jobScenarioLabel: actionScenario, + jobHandledLabel: jobHandledFalse, } jobList := &batchv1.JobList{} if err := transCtx.Client.List(transCtx.Context, jobList, ml); err != nil { @@ -288,6 +290,15 @@ func getActionList(transCtx *CSSetTransformContext, actionScenario string) ([]*b return actionList, nil } +// TODO(free6om): remove all printActionList when all testes pass +func printActionList(logger logr.Logger, actionList []*batchv1.Job) { + var actionNameList []string + for _, action := range actionList { + actionNameList = append(actionNameList, fmt.Sprintf("%s-%v", action.Name, *action.Spec.Suspend)) + } + logger.Info(fmt.Sprintf("action list: %v\n", actionNameList)) +} + func getPodName(parent string, ordinal int) string { return fmt.Sprintf("%s-%d", parent, ordinal) } @@ -296,7 +307,7 @@ func getActionName(parent string, generation, ordinal int, actionType string) st return fmt.Sprintf("%s-%d-%d-%s", parent, generation, ordinal, actionType) } -func getLeaderPodName(membersStatus []workloads.ConsensusMemberStatus) string { +func getLeaderPodName(membersStatus []workloads.MemberStatus) string { for _, memberStatus := range membersStatus { if memberStatus.IsLeader { return memberStatus.PodName @@ -314,20 +325,20 @@ func getPodOrdinal(podName string) (int, error) { } // ordinal is the ordinal of pod which this action apply to -func createAction(dag *graph.DAG, csSet *workloads.ConsensusSet, action *batchv1.Job) error { - if err := intctrlutil.SetOwnership(csSet, action, model.GetScheme(), csSetFinalizerName); err != nil { +func createAction(dag *graph.DAG, rsm *workloads.ReplicatedStateMachine, action *batchv1.Job) error { + if err := intctrlutil.SetOwnership(rsm, action, model.GetScheme(), rsmFinalizerName); err != nil { return err } model.PrepareCreate(dag, action) return nil } -func buildAction(csSet *workloads.ConsensusSet, actionName, actionType, actionScenario string, leader, target string) *batchv1.Job { - env := buildActionEnv(csSet, leader, target) - template := buildActionPodTemplate(csSet, env, actionType) - return builder.NewJobBuilder(csSet.Namespace, actionName). - AddLabels(model.AppInstanceLabelKey, csSet.Name). - AddLabels(model.KBManagedByKey, kindConsensusSet). +func buildAction(rsm *workloads.ReplicatedStateMachine, actionName, actionType, actionScenario string, leader, target string) *batchv1.Job { + env := buildActionEnv(rsm, leader, target) + template := buildActionPodTemplate(rsm, env, actionType) + return builder.NewJobBuilder(rsm.Namespace, actionName). + AddLabels(constant.AppInstanceLabelKey, rsm.Name). + AddLabels(constant.KBManagedByKey, kindReplicatedStateMachine). AddLabels(jobScenarioLabel, actionScenario). AddLabels(jobTypeLabel, actionType). AddLabels(jobHandledLabel, jobHandledFalse). @@ -336,8 +347,8 @@ func buildAction(csSet *workloads.ConsensusSet, actionName, actionType, actionSc GetObject() } -func buildActionPodTemplate(csSet *workloads.ConsensusSet, env []corev1.EnvVar, actionType string) *corev1.PodTemplateSpec { - credential := csSet.Spec.Credential +func buildActionPodTemplate(rsm *workloads.ReplicatedStateMachine, env []corev1.EnvVar, actionType string) *corev1.PodTemplateSpec { + credential := rsm.Spec.Credential credentialEnv := make([]corev1.EnvVar, 0) if credential != nil { credentialEnv = append(credentialEnv, @@ -353,7 +364,7 @@ func buildActionPodTemplate(csSet *workloads.ConsensusSet, env []corev1.EnvVar, }) } env = append(env, credentialEnv...) - reconfiguration := csSet.Spec.MembershipReconfiguration + reconfiguration := rsm.Spec.MembershipReconfiguration image := findActionImage(reconfiguration, actionType) command := getActionCommand(reconfiguration, actionType) container := corev1.Container{ @@ -372,11 +383,11 @@ func buildActionPodTemplate(csSet *workloads.ConsensusSet, env []corev1.EnvVar, return template } -func buildActionEnv(csSet *workloads.ConsensusSet, leader, target string) []corev1.EnvVar { - svcName := getHeadlessSvcName(*csSet) +func buildActionEnv(rsm *workloads.ReplicatedStateMachine, leader, target string) []corev1.EnvVar { + svcName := getHeadlessSvcName(*rsm) leaderHost := fmt.Sprintf("%s.%s", leader, svcName) targetHost := fmt.Sprintf("%s.%s", target, svcName) - svcPort := findSvcPort(*csSet) + svcPort := findSvcPort(*rsm) return []corev1.EnvVar{ { Name: leaderHostVarName, @@ -467,7 +478,7 @@ func doActionCleanup(dag *graph.DAG, action *batchv1.Job) { model.PrepareUpdate(dag, actionOld, actionNew) } -func emitEvent(transCtx *CSSetTransformContext, action *batchv1.Job) { +func emitEvent(transCtx *rsmTransformContext, action *batchv1.Job) { switch { case action.Status.Succeeded > 0: emitActionSucceedEvent(transCtx, action.Labels[jobTypeLabel], action.Name) @@ -476,21 +487,21 @@ func emitEvent(transCtx *CSSetTransformContext, action *batchv1.Job) { } } -func emitActionSucceedEvent(transCtx *CSSetTransformContext, actionType, actionName string) { +func emitActionSucceedEvent(transCtx *rsmTransformContext, actionType, actionName string) { message := fmt.Sprintf("%s succeed, job name: %s", actionType, actionName) emitActionEvent(transCtx, corev1.EventTypeNormal, actionType, message) } -func emitActionFailedEvent(transCtx *CSSetTransformContext, actionType, actionName string) { +func emitActionFailedEvent(transCtx *rsmTransformContext, actionType, actionName string) { message := fmt.Sprintf("%s failed, job name: %s", actionType, actionName) emitActionEvent(transCtx, corev1.EventTypeWarning, actionType, message) } -func emitAbnormalEvent(transCtx *CSSetTransformContext, actionType, actionName string, err error) { +func emitAbnormalEvent(transCtx *rsmTransformContext, actionType, actionName string, err error) { message := fmt.Sprintf("%s, job name: %s", err.Error(), actionName) emitActionEvent(transCtx, corev1.EventTypeWarning, actionType, message) } -func emitActionEvent(transCtx *CSSetTransformContext, eventType, reason, message string) { - transCtx.EventRecorder.Event(transCtx.CSSet, eventType, strings.ToUpper(reason), message) +func emitActionEvent(transCtx *rsmTransformContext, eventType, reason, message string) { + transCtx.EventRecorder.Event(transCtx.rsm, eventType, strings.ToUpper(reason), message) } diff --git a/internal/controller/rsm/utils_test.go b/internal/controller/rsm/utils_test.go new file mode 100644 index 00000000000..8d284636925 --- /dev/null +++ b/internal/controller/rsm/utils_test.go @@ -0,0 +1,324 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package rsm + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/golang/mock/gomock" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" + + workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" + "github.com/apecloud/kubeblocks/internal/constant" + "github.com/apecloud/kubeblocks/internal/controller/builder" +) + +var _ = Describe("utils test", func() { + var priorityMap map[string]int + + BeforeEach(func() { + rsm = builder.NewReplicatedStateMachineBuilder(namespace, name).SetRoles(roles).GetObject() + priorityMap = composeRolePriorityMap(*rsm) + }) + + Context("composeRolePriorityMap function", func() { + It("should work well", func() { + priorityList := []int{ + leaderPriority, + followerReadonlyPriority, + followerNonePriority, + learnerPriority, + } + Expect(priorityMap).ShouldNot(BeZero()) + Expect(priorityMap).Should(HaveLen(len(roles) + 1)) + for i, role := range roles { + Expect(priorityMap[role.Name]).Should(Equal(priorityList[i])) + } + }) + }) + + Context("sortPods function", func() { + It("should work well", func() { + pods := []corev1.Pod{ + *builder.NewPodBuilder(namespace, "pod-0").AddLabels(roleLabelKey, "follower").GetObject(), + *builder.NewPodBuilder(namespace, "pod-1").AddLabels(roleLabelKey, "logger").GetObject(), + *builder.NewPodBuilder(namespace, "pod-2").GetObject(), + *builder.NewPodBuilder(namespace, "pod-3").AddLabels(roleLabelKey, "learner").GetObject(), + *builder.NewPodBuilder(namespace, "pod-4").AddLabels(roleLabelKey, "candidate").GetObject(), + *builder.NewPodBuilder(namespace, "pod-5").AddLabels(roleLabelKey, "leader").GetObject(), + *builder.NewPodBuilder(namespace, "pod-6").AddLabels(roleLabelKey, "learner").GetObject(), + } + expectedOrder := []string{"pod-4", "pod-2", "pod-3", "pod-6", "pod-1", "pod-0", "pod-5"} + + sortPods(pods, priorityMap, false) + for i, pod := range pods { + Expect(pod.Name).Should(Equal(expectedOrder[i])) + } + }) + }) + + Context("sortMembersStatus function", func() { + It("should work well", func() { + // 1(learner)->2(learner)->4(logger)->0(follower)->3(leader) + membersStatus := []workloads.MemberStatus{ + { + PodName: "pod-0", + ReplicaRole: workloads.ReplicaRole{Name: "follower"}, + }, + { + PodName: "pod-1", + ReplicaRole: workloads.ReplicaRole{Name: "learner"}, + }, + { + PodName: "pod-2", + ReplicaRole: workloads.ReplicaRole{Name: "learner"}, + }, + { + PodName: "pod-3", + ReplicaRole: workloads.ReplicaRole{Name: "leader"}, + }, + { + PodName: "pod-4", + ReplicaRole: workloads.ReplicaRole{Name: "logger"}, + }, + } + expectedOrder := []string{"pod-3", "pod-0", "pod-4", "pod-2", "pod-1"} + + sortMembersStatus(membersStatus, priorityMap) + for i, status := range membersStatus { + Expect(status.PodName).Should(Equal(expectedOrder[i])) + } + }) + }) + + Context("setMembersStatus function", func() { + It("should work well", func() { + pods := []corev1.Pod{ + *builder.NewPodBuilder(namespace, "pod-0").AddLabels(roleLabelKey, "follower").GetObject(), + *builder.NewPodBuilder(namespace, "pod-1").AddLabels(roleLabelKey, "leader").GetObject(), + *builder.NewPodBuilder(namespace, "pod-2").AddLabels(roleLabelKey, "follower").GetObject(), + } + readyCondition := corev1.PodCondition{ + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + } + pods[0].Status.Conditions = append(pods[0].Status.Conditions, readyCondition) + pods[1].Status.Conditions = append(pods[1].Status.Conditions, readyCondition) + oldMembersStatus := []workloads.MemberStatus{ + { + PodName: "pod-0", + ReplicaRole: workloads.ReplicaRole{Name: "leader"}, + }, + { + PodName: "pod-1", + ReplicaRole: workloads.ReplicaRole{Name: "follower"}, + }, + { + PodName: "pod-2", + ReplicaRole: workloads.ReplicaRole{Name: "follower"}, + }, + } + rsm.Spec.Replicas = 3 + rsm.Status.MembersStatus = oldMembersStatus + setMembersStatus(rsm, pods) + + Expect(rsm.Status.MembersStatus).Should(HaveLen(len(oldMembersStatus))) + Expect(rsm.Status.MembersStatus[0].PodName).Should(Equal("pod-1")) + Expect(rsm.Status.MembersStatus[0].Name).Should(Equal("leader")) + Expect(rsm.Status.MembersStatus[1].PodName).Should(Equal("pod-2")) + Expect(rsm.Status.MembersStatus[1].Name).Should(Equal("follower")) + Expect(rsm.Status.MembersStatus[2].PodName).Should(Equal("pod-0")) + Expect(rsm.Status.MembersStatus[2].Name).Should(Equal("follower")) + }) + }) + + Context("getRoleName function", func() { + It("should work well", func() { + pod := builder.NewPodBuilder(namespace, name).AddLabels(roleLabelKey, "LEADER").GetObject() + role := getRoleName(*pod) + Expect(role).Should(Equal("leader")) + }) + }) + + Context("getPodsOfStatefulSet function", func() { + It("should work well", func() { + sts := builder.NewStatefulSetBuilder(namespace, name). + AddLabels(constant.KBManagedByKey, kindReplicatedStateMachine). + AddLabels(constant.AppInstanceLabelKey, name). + GetObject() + pod := builder.NewPodBuilder(namespace, getPodName(name, 0)). + AddLabels(constant.KBManagedByKey, kindReplicatedStateMachine). + AddLabels(constant.AppInstanceLabelKey, name). + GetObject() + k8sMock.EXPECT(). + List(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, podList *corev1.PodList, _ ...client.ListOption) error { + Expect(podList).ShouldNot(BeNil()) + podList.Items = []corev1.Pod{*pod} + return nil + }).Times(1) + + pods, err := getPodsOfStatefulSet(ctx, k8sMock, sts) + Expect(err).Should(BeNil()) + Expect(pods).Should(HaveLen(1)) + Expect(pods[0].Namespace).Should(Equal(pod.Namespace)) + Expect(pods[0].Name).Should(Equal(pod.Name)) + }) + }) + + Context("getHeadlessSvcName function", func() { + It("should work well", func() { + Expect(getHeadlessSvcName(*rsm)).Should(Equal("bar-headless")) + }) + }) + + Context("findSvcPort function", func() { + It("should work well", func() { + By("set port name") + rsm.Spec.Service.Ports = []corev1.ServicePort{ + { + Name: "svc-port", + Protocol: corev1.ProtocolTCP, + Port: 12345, + TargetPort: intstr.FromString("my-service"), + }, + } + containerPort := int32(54321) + container := corev1.Container{ + Name: name, + Ports: []corev1.ContainerPort{ + { + Name: "my-service", + Protocol: corev1.ProtocolTCP, + ContainerPort: containerPort, + }, + }, + } + pod := builder.NewPodBuilder(namespace, getPodName(name, 0)). + SetContainers([]corev1.Container{container}). + GetObject() + rsm.Spec.Template = corev1.PodTemplateSpec{ + ObjectMeta: pod.ObjectMeta, + Spec: pod.Spec, + } + Expect(findSvcPort(*rsm)).Should(BeEquivalentTo(containerPort)) + + By("set port number") + rsm.Spec.Service.Ports = []corev1.ServicePort{ + { + Name: "svc-port", + Protocol: corev1.ProtocolTCP, + Port: 12345, + TargetPort: intstr.FromInt(int(containerPort)), + }, + } + Expect(findSvcPort(*rsm)).Should(BeEquivalentTo(containerPort)) + + By("set no matched port") + rsm.Spec.Service.Ports = []corev1.ServicePort{ + { + Name: "svc-port", + Protocol: corev1.ProtocolTCP, + Port: 12345, + TargetPort: intstr.FromInt(int(containerPort - 1)), + }, + } + Expect(findSvcPort(*rsm)).Should(BeZero()) + }) + }) + + Context("getPodName function", func() { + It("should work well", func() { + Expect(getPodName(name, 1)).Should(Equal("bar-1")) + }) + }) + + Context("getActionName function", func() { + It("should work well", func() { + Expect(getActionName(name, 1, 2, jobTypeSwitchover)).Should(Equal("bar-1-2-switchover")) + }) + }) + + Context("getLeaderPodName function", func() { + It("should work well", func() { + By("set leader") + membersStatus := []workloads.MemberStatus{ + { + PodName: "pod-0", + ReplicaRole: workloads.ReplicaRole{Name: "leader", IsLeader: true}, + }, + { + PodName: "pod-1", + ReplicaRole: workloads.ReplicaRole{Name: "follower"}, + }, + { + PodName: "pod-2", + ReplicaRole: workloads.ReplicaRole{Name: "follower"}, + }, + } + Expect(getLeaderPodName(membersStatus)).Should(Equal(membersStatus[0].PodName)) + + By("set no leader") + membersStatus[0].IsLeader = false + Expect(getLeaderPodName(membersStatus)).Should(BeZero()) + }) + }) + + Context("getPodOrdinal function", func() { + It("should work well", func() { + ordinal, err := getPodOrdinal("pod-5") + Expect(err).Should(BeNil()) + Expect(ordinal).Should(Equal(5)) + + _, err = getPodOrdinal("foo-bar") + Expect(err).ShouldNot(BeNil()) + Expect(err.Error()).Should(ContainSubstring("wrong pod name")) + }) + }) + + Context("findActionImage function", func() { + It("should work well", func() { + Expect(findActionImage(&workloads.MembershipReconfiguration{}, jobTypePromote)).Should(Equal(defaultActionImage)) + }) + }) + + Context("getActionCommand function", func() { + It("should work well", func() { + reconfiguration := &workloads.MembershipReconfiguration{ + SwitchoverAction: &workloads.Action{Command: []string{"switchover"}}, + MemberJoinAction: &workloads.Action{Command: []string{"member-join"}}, + MemberLeaveAction: &workloads.Action{Command: []string{"member-leave"}}, + LogSyncAction: &workloads.Action{Command: []string{"log-sync"}}, + PromoteAction: &workloads.Action{Command: []string{"promote"}}, + } + + Expect(getActionCommand(reconfiguration, jobTypeSwitchover)).Should(Equal(reconfiguration.SwitchoverAction.Command)) + Expect(getActionCommand(reconfiguration, jobTypeMemberJoinNotifying)).Should(Equal(reconfiguration.MemberJoinAction.Command)) + Expect(getActionCommand(reconfiguration, jobTypeMemberLeaveNotifying)).Should(Equal(reconfiguration.MemberLeaveAction.Command)) + Expect(getActionCommand(reconfiguration, jobTypeLogSync)).Should(Equal(reconfiguration.LogSyncAction.Command)) + Expect(getActionCommand(reconfiguration, jobTypePromote)).Should(Equal(reconfiguration.PromoteAction.Command)) + }) + }) +}) diff --git a/internal/testutil/k8s/mocks/generate.go b/internal/testutil/k8s/mocks/generate.go index 9ed9b29be7d..c827302a197 100644 --- a/internal/testutil/k8s/mocks/generate.go +++ b/internal/testutil/k8s/mocks/generate.go @@ -19,4 +19,4 @@ along with this program. If not, see . package mocks -//go:generate go run github.com/golang/mock/mockgen -copyright_file ../../../../hack/boilerplate.go.txt -package mocks -destination k8sclient_mocks.go sigs.k8s.io/controller-runtime/pkg/client Client +//go:generate go run github.com/golang/mock/mockgen -copyright_file ../../../../hack/boilerplate.go.txt -package mocks -destination k8sclient_mocks.go sigs.k8s.io/controller-runtime/pkg/client Client,StatusWriter