diff --git a/.github/workflows/cicd-pull-request.yml b/.github/workflows/cicd-pull-request.yml index 98e3ccf3d72..3fbc1064af2 100644 --- a/.github/workflows/cicd-pull-request.yml +++ b/.github/workflows/cicd-pull-request.yml @@ -218,16 +218,6 @@ jobs: DOCKERFILE_PATH: "./docker/Dockerfile-tools" secrets: inherit - check-datascript-image: - needs: trigger-mode - if: contains(needs.trigger-mode.outputs.trigger-mode, '[docker]') - uses: apecloud/apecloud-cd/.github/workflows/release-image-check.yml@v0.1.24 - with: - IMG: "apecloud/kubeblocks-datascript" - BUILDX_PLATFORMS: "linux/amd64" - DOCKERFILE_PATH: "./docker/Dockerfile-datascript" - secrets: inherit - check-dataprotection-image: needs: trigger-mode if: contains(needs.trigger-mode.outputs.trigger-mode, '[docker]') @@ -256,7 +246,7 @@ jobs: pr-check: name: make test needs: [ trigger-mode, test-parallel, pr-pre-check, pr-make-test, - check-image, check-tools-image, check-datascript-image, check-dataprotection-image, check-helm ] + check-image, check-tools-image, check-dataprotection-image, check-helm ] if: ${{ github.event.action == 'labeled' && (github.event.label.name == 'approved' || github.event.label.name == 'pre-approve') && always() }} runs-on: ubuntu-latest steps: @@ -296,11 +286,6 @@ jobs: exit 1 fi - if [[ "${{ needs.check-datascript-image.result }}" == "failure" || "${{ needs.check-datascript-image.result }}" == "cancelled" ]]; then - echo "release datascript image fail" - exit 1 - fi - if [[ "${{ needs.check-dataprotection-image.result }}" == "failure" || "${{ needs.check-dataprotection-image.result }}" == "cancelled" ]]; then echo "release dataprotection image fail" exit 1 diff --git a/.github/workflows/cicd-push.yml b/.github/workflows/cicd-push.yml index 56d1f359d24..e15498237a6 100644 --- a/.github/workflows/cicd-push.yml +++ b/.github/workflows/cicd-push.yml @@ -211,16 +211,6 @@ jobs: DOCKERFILE_PATH: "./docker/Dockerfile-tools" secrets: inherit - check-datascript-image: - needs: trigger-mode - if: ${{ contains(needs.trigger-mode.outputs.trigger-mode, '[docker]') }} - uses: apecloud/apecloud-cd/.github/workflows/release-image-check.yml@v0.1.24 - with: - IMG: "apecloud/kubeblocks-datascript" - BUILDX_PLATFORMS: "linux/amd64" - DOCKERFILE_PATH: "./docker/Dockerfile-datascript" - secrets: inherit - check-dataprotection-image: needs: trigger-mode if: ${{ contains(needs.trigger-mode.outputs.trigger-mode, '[docker]') }} @@ -313,7 +303,7 @@ jobs: push-check: name: make-test needs: [ trigger-mode, push-pre-check, push-make-test, check-image, check-tools-image, - check-datascript-image, check-dataprotection-image, check-helm, apis-doc, check-license-header ] + check-dataprotection-image, check-helm, apis-doc, check-license-header ] if: ${{ always() }} runs-on: ubuntu-latest steps: @@ -343,11 +333,6 @@ jobs: exit 1 fi - if [[ "${{ needs.check-datascript-image.result }}" == "failure" || "${{ needs.check-datascript-image.result }}" == "cancelled" ]]; then - echo "check datascript image fail" - exit 1 - fi - if [[ "${{ needs.check-dataprotection-image.result }}" == "failure" || "${{ needs.check-dataprotection-image.result }}" == "cancelled" ]]; then echo "check dataprotection image fail" exit 1 diff --git a/.github/workflows/release-image.yml b/.github/workflows/release-image.yml index f1b72991f33..5076763d0c9 100644 --- a/.github/workflows/release-image.yml +++ b/.github/workflows/release-image.yml @@ -19,7 +19,6 @@ on: - Dockerfile - Dockerfile-charts - Dockerfile-dataprotection - - Dockerfile-datascript - Dockerfile-dev - Dockerfile-tools release: @@ -93,17 +92,6 @@ jobs: DOCKERFILE_PATH: "./docker/Dockerfile-tools" secrets: inherit - release-datascript-image: - if: ${{ inputs.dockerfile == '' || inputs.dockerfile == 'Dockerfile-datascript' }} - needs: release-version - uses: apecloud/apecloud-cd/.github/workflows/release-image-cache.yml@v0.1.24 - with: - IMG: "apecloud/kubeblocks-datascript" - VERSION: "${{ needs.release-version.outputs.release-version }}" - APECD_REF: "v0.1.24" - DOCKERFILE_PATH: "./docker/Dockerfile-datascript" - secrets: inherit - release-dataprotection-image: if: ${{ inputs.dockerfile == '' || inputs.dockerfile == 'Dockerfile-dataprotection' }} needs: release-version @@ -142,7 +130,7 @@ jobs: release-message: runs-on: ubuntu-latest - needs: [ release-image, release-tools-image, release-datascript-image, release-dataprotection-image ] + needs: [ release-image, release-tools-image, release-dataprotection-image ] outputs: content-result: ${{ steps.release_message.outputs.content_result }} release-version: ${{ steps.release_message.outputs.release_version }} @@ -157,7 +145,7 @@ jobs: echo 'artifact_key='${ARTIFACT_KEY} >> $GITHUB_OUTPUT CONTENT="error" - if [[ "${{ needs.release-image.result }}" == "success" && "${{ needs.release-tools-image.result }}" == "success" && "${{ needs.release-datascript-image.result }}" == "success" && "${{ needs.release-dataprotection-image.result }}" == "success" ]]; then + if [[ "${{ needs.release-image.result }}" == "success" && "${{ needs.release-tools-image.result }}" == "success" && "${{ needs.release-dataprotection-image.result }}" == "success" ]]; then CONTENT="success" echo "success" > ${ARTIFACT_KEY} else diff --git a/apis/apps/v1alpha1/opsrequest_conditions.go b/apis/apps/v1alpha1/opsrequest_conditions.go index 8de6aafec0a..ed7533bc8a7 100644 --- a/apis/apps/v1alpha1/opsrequest_conditions.go +++ b/apis/apps/v1alpha1/opsrequest_conditions.go @@ -41,7 +41,6 @@ const ( ConditionTypeStart = "Starting" ConditionTypeVersionUpgrading = "VersionUpgrading" ConditionTypeExpose = "Exposing" - ConditionTypeDataScript = "ExecuteDataScript" ConditionTypeBackup = "Backup" ConditionTypeInstanceRebuilding = "InstancesRebuilding" ConditionTypeCustomOperation = "CustomOperation" @@ -304,20 +303,6 @@ func NewReconfigureCondition(ops *OpsRequest) *metav1.Condition { } } -func NewDataScriptCondition(ops *OpsRequest) *metav1.Condition { - return newOpsCondition(ops, ConditionTypeDataScript, "DataScriptStarted", fmt.Sprintf("Start to execute data script in Cluster: %s", ops.Spec.GetClusterName())) -} - -func newOpsCondition(_ *OpsRequest, condType, reason, message string) *metav1.Condition { - return &metav1.Condition{ - Type: condType, - Status: metav1.ConditionTrue, - Reason: reason, - LastTransitionTime: metav1.Now(), - Message: message, - } -} - // NewReconfigureRunningCondition creates a condition that the OpsRequest reconfigure workflow func NewReconfigureRunningCondition(ops *OpsRequest, conditionType string, configSpecName string, info ...string) *metav1.Condition { status := metav1.ConditionTrue diff --git a/apis/apps/v1alpha1/opsrequest_types.go b/apis/apps/v1alpha1/opsrequest_types.go index bb9b8ab701d..1c8dd0eca13 100644 --- a/apis/apps/v1alpha1/opsrequest_types.go +++ b/apis/apps/v1alpha1/opsrequest_types.go @@ -71,7 +71,7 @@ type OpsRequestSpec struct { // Specifies the type of this operation. Supported types include "Start", "Stop", "Restart", "Switchover", // "VerticalScaling", "HorizontalScaling", "VolumeExpansion", "Reconfiguring", "Upgrade", "Backup", "Restore", - // "Expose", "DataScript", "RebuildInstance", "Custom". + // "Expose", "RebuildInstance", "Custom". // // Note: This field is immutable once set. // @@ -194,16 +194,6 @@ type SpecificOpsRequest struct { // +optional ExposeList []Expose `json:"expose,omitempty"` - // Specifies the image and scripts for executing engine-specific operations such as creating databases or users. - // It supports limited engines including MySQL, PostgreSQL, Redis, MongoDB. - // - // ScriptSpec has been replaced by the more versatile OpsDefinition. - // It is recommended to use OpsDefinition instead. - // ScriptSpec is deprecated and will be removed in a future version. - // - // +optional - ScriptSpec *ScriptSpec `json:"scriptSpec,omitempty"` - // Specifies the parameters to backup a Cluster. // +optional Backup *Backup `json:"backup,omitempty"` @@ -870,68 +860,6 @@ type PointInTimeRefSpec struct { Ref RefNamespaceName `json:"ref,omitempty"` } -// ScriptSpec is a legacy feature for executing engine-specific operations such as creating databases or users. -// It supports limited engines including MySQL, PostgreSQL, Redis, MongoDB. -// -// ScriptSpec has been replaced by the more versatile OpsDefinition. -// It is recommended to use OpsDefinition instead. ScriptSpec is deprecated and will be removed in a future version. -type ScriptSpec struct { - // Specifies the name of the Component. - ComponentOps `json:",inline"` - - // Specifies the image to be used to execute scripts. - // - // By default, the image "apecloud/kubeblocks-datascript:latest" is used. - // - // +optional - Image string `json:"image,omitempty"` - - // Defines the secret to be used to execute the script. If not specified, the default cluster root credential secret is used. - // +optional - Secret *ScriptSecret `json:"secret,omitempty"` - - // Defines the content of scripts to be executed. - // - // All scripts specified in this field will be executed in the order they are provided. - // - // Note: this field cannot be modified once set. - // - // +optional - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="forbidden to update spec.scriptSpec.script" - Script []string `json:"script,omitempty"` - - // Specifies the sources of the scripts to be executed. - // Each script can be imported either from a ConfigMap or a Secret. - // - // All scripts obtained from the sources specified in this field will be executed after - // any scripts provided in the `script` field. - // - // Execution order: - // 1. Scripts provided in the `script` field, in the order of the scripts listed. - // 2. Scripts imported from ConfigMaps, in the order of the sources listed. - // 3. Scripts imported from Secrets, in the order of the sources listed. - // - // Note: this field cannot be modified once set. - // - // +optional - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="forbidden to update spec.scriptSpec.scriptFrom" - ScriptFrom *ScriptFrom `json:"scriptFrom,omitempty"` - - // Specifies the labels used to select the Pods on which the script should be executed. - // - // By default, the script is executed on the Pod associated with the service named "{clusterName}-{componentName}", - // which typically routes to the Pod with the primary/leader role. - // - // However, some Components, such as Redis, do not synchronize account information between primary and secondary Pods. - // In these cases, the script must be executed on all replica Pods matching the selector. - // - // Note: this field cannot be modified once set. - // - // +optional - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="forbidden to update spec.scriptSpec.script.selector" - Selector *metav1.LabelSelector `json:"selector,omitempty"` -} - type Backup struct { // Specifies the name of the Backup custom resource. // @@ -1022,42 +950,6 @@ type Restore struct { DeferPostReadyUntilClusterRunning bool `json:"deferPostReadyUntilClusterRunning,omitempty"` } -// ScriptSecret represents the secret that is used to execute the script. -type ScriptSecret struct { - // Specifies the name of the secret. - // +kubebuilder:validation:Required - // +kubebuilder:validation:MaxLength=63 - // +kubebuilder:validation:Pattern:=`^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$` - Name string `json:"name"` - // Used to specify the username part of the secret. - // +kubebuilder:default:="username" - // +optional - UsernameKey string `json:"usernameKey,omitempty"` - // Used to specify the password part of the secret. - // +kubebuilder:default:="password" - // +optional - PasswordKey string `json:"passwordKey,omitempty"` -} - -// ScriptFrom specifies the source of the script to be executed, which can be either a ConfigMap or a Secret. -type ScriptFrom struct { - // A list of ConfigMapKeySelector objects, each specifies a ConfigMap and a key containing the script. - // - // Note: This field cannot be modified once set. - // - // +optional - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="forbidden to update spec.scriptSpec.scriptFrom.configMapRef" - ConfigMapRef []corev1.ConfigMapKeySelector `json:"configMapRef,omitempty"` - - // A list of SecretKeySelector objects, each specifies a Secret and a key containing the script. - // - // Note: This field cannot be modified once set. - // - // +optional - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="forbidden to update spec.scriptSpec.scriptFrom.secretRef" - SecretRef []corev1.SecretKeySelector `json:"secretRef,omitempty"` -} - // OpsRequestStatus represents the observed state of an OpsRequest. type OpsRequestStatus struct { // Records the cluster generation after the OpsRequest action has been handled. @@ -1108,7 +1000,7 @@ type OpsRequestStatus struct { // Describes the detailed status of the OpsRequest. // Possible condition types include "Cancelled", "WaitForProgressing", "Validated", "Succeed", "Failed", "Restarting", // "VerticalScaling", "HorizontalScaling", "VolumeExpanding", "Reconfigure", "Switchover", "Stopping", "Starting", - // "VersionUpgrading", "Exposing", "ExecuteDataScript", "Backup", "InstancesRebuilding", "CustomOperation". + // "VersionUpgrading", "Exposing", "Backup", "InstancesRebuilding", "CustomOperation". // +optional // +patchMergeKey=type // +patchStrategy=merge diff --git a/apis/apps/v1alpha1/opsrequest_validation.go b/apis/apps/v1alpha1/opsrequest_validation.go index ec5aceffd4d..4f650084f11 100644 --- a/apis/apps/v1alpha1/opsrequest_validation.go +++ b/apis/apps/v1alpha1/opsrequest_validation.go @@ -135,8 +135,6 @@ func (r *OpsRequest) validateOps(ctx context.Context, return r.validateReconfigure(ctx, k8sClient, cluster) case SwitchoverType: return r.validateSwitchover(ctx, k8sClient, cluster) - case DataScriptType: - return r.validateDataScript(ctx, k8sClient, cluster) case ExposeType: return r.validateExpose(ctx, cluster) case RebuildInstanceType: @@ -730,48 +728,6 @@ func (r *OpsRequest) getSCNameByPvcAndCheckStorageSize(ctx context.Context, return pvc.Spec.StorageClassName, nil } -// validateDataScript validates the data script. -func (r *OpsRequest) validateDataScript(ctx context.Context, cli client.Client, cluster *Cluster) error { - validateScript := func(spec *ScriptSpec) error { - rawScripts := spec.Script - scriptsFrom := spec.ScriptFrom - if len(rawScripts) == 0 && (scriptsFrom == nil) { - return fmt.Errorf("spec.scriptSpec.script and spec.scriptSpec.scriptFrom can not be empty at the same time") - } - if scriptsFrom != nil { - if scriptsFrom.ConfigMapRef == nil && scriptsFrom.SecretRef == nil { - return fmt.Errorf("spec.scriptSpec.scriptFrom.configMapRefs and spec.scriptSpec.scriptFrom.secretRefs can not be empty at the same time") - } - for _, configMapRef := range scriptsFrom.ConfigMapRef { - if err := cli.Get(ctx, types.NamespacedName{Name: configMapRef.Name, Namespace: r.Namespace}, &corev1.ConfigMap{}); err != nil { - return err - } - } - for _, secret := range scriptsFrom.SecretRef { - if err := cli.Get(ctx, types.NamespacedName{Name: secret.Name, Namespace: r.Namespace}, &corev1.Secret{}); err != nil { - return err - } - } - } - return nil - } - - scriptSpec := r.Spec.ScriptSpec - if scriptSpec == nil { - return notEmptyError("spec.scriptSpec") - } - - if err := r.checkComponentExistence(cluster, []ComponentOps{scriptSpec.ComponentOps}); err != nil { - return err - } - - if err := validateScript(scriptSpec); err != nil { - return err - } - - return nil -} - // validateVerticalResourceList checks if k8s resourceList is legal func validateVerticalResourceList(resourceList map[corev1.ResourceName]resource.Quantity) (string, error) { for k := range resourceList { diff --git a/apis/apps/v1alpha1/type.go b/apis/apps/v1alpha1/type.go index 140d4358f98..b9001c668c6 100644 --- a/apis/apps/v1alpha1/type.go +++ b/apis/apps/v1alpha1/type.go @@ -367,7 +367,7 @@ const ( // OpsType defines operation types. // +enum -// +kubebuilder:validation:Enum={Upgrade,VerticalScaling,VolumeExpansion,HorizontalScaling,Restart,Reconfiguring,Start,Stop,Expose,Switchover,DataScript,Backup,Restore,RebuildInstance,Custom} +// +kubebuilder:validation:Enum={Upgrade,VerticalScaling,VolumeExpansion,HorizontalScaling,Restart,Reconfiguring,Start,Stop,Expose,Switchover,Backup,Restore,RebuildInstance,Custom} type OpsType string const ( @@ -381,7 +381,6 @@ const ( StopType OpsType = "Stop" // StopType the stop operation will delete all pods in a cluster concurrently. StartType OpsType = "Start" // StartType the start operation will start the pods which is deleted in stop operation. ExposeType OpsType = "Expose" - DataScriptType OpsType = "DataScript" // DataScriptType the data script operation will execute the data script against the cluster. BackupType OpsType = "Backup" RestoreType OpsType = "Restore" RebuildInstanceType OpsType = "RebuildInstance" // RebuildInstance rebuilding an instance is very useful when a node is offline or an instance is unrecoverable. diff --git a/apis/apps/v1alpha1/zz_generated.deepcopy.go b/apis/apps/v1alpha1/zz_generated.deepcopy.go index fb60452a065..150aceeb326 100644 --- a/apis/apps/v1alpha1/zz_generated.deepcopy.go +++ b/apis/apps/v1alpha1/zz_generated.deepcopy.go @@ -4426,86 +4426,6 @@ func (in *SchedulingPolicy) DeepCopy() *SchedulingPolicy { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScriptFrom) DeepCopyInto(out *ScriptFrom) { - *out = *in - if in.ConfigMapRef != nil { - in, out := &in.ConfigMapRef, &out.ConfigMapRef - *out = make([]v1.ConfigMapKeySelector, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = make([]v1.SecretKeySelector, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScriptFrom. -func (in *ScriptFrom) DeepCopy() *ScriptFrom { - if in == nil { - return nil - } - out := new(ScriptFrom) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScriptSecret) DeepCopyInto(out *ScriptSecret) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScriptSecret. -func (in *ScriptSecret) DeepCopy() *ScriptSecret { - if in == nil { - return nil - } - out := new(ScriptSecret) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScriptSpec) DeepCopyInto(out *ScriptSpec) { - *out = *in - out.ComponentOps = in.ComponentOps - if in.Secret != nil { - in, out := &in.Secret, &out.Secret - *out = new(ScriptSecret) - **out = **in - } - if in.Script != nil { - in, out := &in.Script, &out.Script - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ScriptFrom != nil { - in, out := &in.ScriptFrom, &out.ScriptFrom - *out = new(ScriptFrom) - (*in).DeepCopyInto(*out) - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScriptSpec. -func (in *ScriptSpec) DeepCopy() *ScriptSpec { - if in == nil { - return nil - } - out := new(ScriptSpec) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SecretRef) DeepCopyInto(out *SecretRef) { *out = *in @@ -4939,11 +4859,6 @@ func (in *SpecificOpsRequest) DeepCopyInto(out *SpecificOpsRequest) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.ScriptSpec != nil { - in, out := &in.ScriptSpec, &out.ScriptSpec - *out = new(ScriptSpec) - (*in).DeepCopyInto(*out) - } if in.Backup != nil { in, out := &in.Backup, &out.Backup *out = new(Backup) diff --git a/cmd/manager/main.go b/cmd/manager/main.go index 5a43ff6169d..2f57010096c 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -135,7 +135,6 @@ func init() { viper.SetDefault(constant.CfgHostPortConfigMapName, "kubeblocks-host-ports") viper.SetDefault(constant.CfgHostPortIncludeRanges, "1025-65536") viper.SetDefault(constant.CfgHostPortExcludeRanges, "6443,10250,10257,10259,2379-2380,30000-32767") - viper.SetDefault(constant.KBDataScriptClientsImage, "apecloud/kubeblocks-datascript:latest") viper.SetDefault(constant.KubernetesClusterDomainEnv, constant.DefaultDNSDomain) viper.SetDefault(instanceset.MaxPlainRevisionCount, 1024) viper.SetDefault(instanceset.FeatureGateIgnorePodVerticalScaling, false) diff --git a/config/crd/bases/apps.kubeblocks.io_opsrequests.yaml b/config/crd/bases/apps.kubeblocks.io_opsrequests.yaml index 5551e540d88..ac1191980e8 100644 --- a/config/crd/bases/apps.kubeblocks.io_opsrequests.yaml +++ b/config/crd/bases/apps.kubeblocks.io_opsrequests.yaml @@ -4476,207 +4476,6 @@ spec: required: - backupName type: object - scriptSpec: - description: |- - Specifies the image and scripts for executing engine-specific operations such as creating databases or users. - It supports limited engines including MySQL, PostgreSQL, Redis, MongoDB. - - - ScriptSpec has been replaced by the more versatile OpsDefinition. - It is recommended to use OpsDefinition instead. - ScriptSpec is deprecated and will be removed in a future version. - properties: - componentName: - description: Specifies the name of the Component. - type: string - image: - description: |- - Specifies the image to be used to execute scripts. - - - By default, the image "apecloud/kubeblocks-datascript:latest" is used. - type: string - script: - description: |- - Defines the content of scripts to be executed. - - - All scripts specified in this field will be executed in the order they are provided. - - - Note: this field cannot be modified once set. - items: - type: string - type: array - x-kubernetes-validations: - - message: forbidden to update spec.scriptSpec.script - rule: self == oldSelf - scriptFrom: - description: |- - Specifies the sources of the scripts to be executed. - Each script can be imported either from a ConfigMap or a Secret. - - - All scripts obtained from the sources specified in this field will be executed after - any scripts provided in the `script` field. - - - Execution order: - 1. Scripts provided in the `script` field, in the order of the scripts listed. - 2. Scripts imported from ConfigMaps, in the order of the sources listed. - 3. Scripts imported from Secrets, in the order of the sources listed. - - - Note: this field cannot be modified once set. - properties: - configMapRef: - description: |- - A list of ConfigMapKeySelector objects, each specifies a ConfigMap and a key containing the script. - - - Note: This field cannot be modified once set. - items: - description: Selects a key from a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the ConfigMap or its key - must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: array - x-kubernetes-validations: - - message: forbidden to update spec.scriptSpec.scriptFrom.configMapRef - rule: self == oldSelf - secretRef: - description: |- - A list of SecretKeySelector objects, each specifies a Secret and a key containing the script. - - - Note: This field cannot be modified once set. - items: - description: SecretKeySelector selects a key of a Secret. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: array - x-kubernetes-validations: - - message: forbidden to update spec.scriptSpec.scriptFrom.secretRef - rule: self == oldSelf - type: object - x-kubernetes-validations: - - message: forbidden to update spec.scriptSpec.scriptFrom - rule: self == oldSelf - secret: - description: Defines the secret to be used to execute the script. - If not specified, the default cluster root credential secret - is used. - properties: - name: - description: Specifies the name of the secret. - maxLength: 63 - pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ - type: string - passwordKey: - default: password - description: Used to specify the password part of the secret. - type: string - usernameKey: - default: username - description: Used to specify the username part of the secret. - type: string - required: - - name - type: object - selector: - description: |- - Specifies the labels used to select the Pods on which the script should be executed. - - - By default, the script is executed on the Pod associated with the service named "{clusterName}-{componentName}", - which typically routes to the Pod with the primary/leader role. - - - However, some Components, such as Redis, do not synchronize account information between primary and secondary Pods. - In these cases, the script must be executed on all replica Pods matching the selector. - - - Note: this field cannot be modified once set. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: forbidden to update spec.scriptSpec.script.selector - rule: self == oldSelf - required: - - componentName - type: object switchover: description: Lists Switchover objects, each specifying a Component to perform the switchover operation. @@ -4739,7 +4538,7 @@ spec: description: |- Specifies the type of this operation. Supported types include "Start", "Stop", "Restart", "Switchover", "VerticalScaling", "HorizontalScaling", "VolumeExpansion", "Reconfiguring", "Upgrade", "Backup", "Restore", - "Expose", "DataScript", "RebuildInstance", "Custom". + "Expose", "RebuildInstance", "Custom". Note: This field is immutable once set. @@ -4754,7 +4553,6 @@ spec: - Stop - Expose - Switchover - - DataScript - Backup - Restore - RebuildInstance @@ -5207,7 +5005,7 @@ spec: Describes the detailed status of the OpsRequest. Possible condition types include "Cancelled", "WaitForProgressing", "Validated", "Succeed", "Failed", "Restarting", "VerticalScaling", "HorizontalScaling", "VolumeExpanding", "Reconfigure", "Switchover", "Stopping", "Starting", - "VersionUpgrading", "Exposing", "ExecuteDataScript", "Backup", "InstancesRebuilding", "CustomOperation". + "VersionUpgrading", "Exposing", "Backup", "InstancesRebuilding", "CustomOperation". items: description: "Condition contains details for one aspect of the current state of this API Resource.\n---\nThis struct is intended for diff --git a/controllers/apps/operations/datascript.go b/controllers/apps/operations/datascript.go deleted file mode 100644 index 051bbf480cf..00000000000 --- a/controllers/apps/operations/datascript.go +++ /dev/null @@ -1,394 +0,0 @@ -/* -Copyright (C) 2022-2024 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package operations - -import ( - "fmt" - "time" - - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - - appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - "github.com/apecloud/kubeblocks/pkg/constant" - intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" -) - -var _ OpsHandler = DataScriptOpsHandler{} - -// DataScriptOpsHandler handles DataScript operation, it is more like a one-time command operation. -type DataScriptOpsHandler struct { -} - -func init() { - // ToClusterPhase is not defined, because 'datascript' does not affect the cluster status. - dataScriptOpsHandler := DataScriptOpsHandler{} - dataScriptBehavior := OpsBehaviour{ - FromClusterPhases: []appsv1alpha1.ClusterPhase{appsv1alpha1.RunningClusterPhase}, - OpsHandler: dataScriptOpsHandler, - } - opsMgr := GetOpsManager() - opsMgr.RegisterOps(appsv1alpha1.DataScriptType, dataScriptBehavior) -} - -// Action implements OpsHandler.Action -// It will create a job to execute the script. It will fail fast if the script is not valid, or the target pod is not found. -func (o DataScriptOpsHandler) Action(reqCtx intctrlutil.RequestCtx, cli client.Client, opsResource *OpsResource) error { - opsRequest := opsResource.OpsRequest - cluster := opsResource.Cluster - spec := opsRequest.Spec.ScriptSpec - - // get component - component := cluster.Spec.GetComponentByName(spec.ComponentName) - if component == nil { - // we have checked component exists in validation, so this should not happen - return intctrlutil.NewFatalError(fmt.Sprintf("component %s not found in cluster %s", spec.ComponentName, cluster.Name)) - } - - clusterDef, err := getClusterDefByName(reqCtx.Ctx, cli, cluster.Spec.ClusterDefRef) - if err != nil { - if apierrors.IsNotFound(err) { - // fail fast if cluster def does not exists - return intctrlutil.NewFatalError(err.Error()) - } - return err - } - // TODO(v1.0): how to? - //// get componentDef - // componentDef := clusterDef.GetComponentDefByName(component.ComponentDefRef) - // if componentDef == nil { - // return intctrlutil.NewFatalError(fmt.Sprintf("componentDef %s not found in clusterDef %s", component.ComponentDefRef, clusterDef.Name)) - // } - return intctrlutil.NewFatalError(fmt.Sprintf("componentDef %s not found in clusterDef %s", component.ComponentDefRef, clusterDef.Name)) - - //// create jobs - // var jobs []*batchv1.Job - //// TODO(v1.0): character-type - // if jobs, err = buildDataScriptJobs(reqCtx, cli, opsResource.Cluster, component, opsRequest, ""); err != nil { - // return err - // } - // for _, job := range jobs { - // if err = cli.Create(reqCtx.Ctx, job); err != nil { - // return err - // } - // } - // return nil -} - -// ReconcileAction implements OpsHandler.ReconcileAction -// It will check the job status, and update the opsRequest status. -// If the job is neither completed nor failed, it will retry after 1 second. -// If the job is completed, it will return OpsSucceedPhase -// If the job is failed, it will return OpsFailedPhase. -func (o DataScriptOpsHandler) ReconcileAction(reqCtx intctrlutil.RequestCtx, cli client.Client, opsResource *OpsResource) (appsv1alpha1.OpsPhase, time.Duration, error) { - opsRequest := opsResource.OpsRequest - cluster := opsResource.Cluster - spec := opsRequest.Spec.ScriptSpec - - meetsJobConditions := func(job *batchv1.Job, condType batchv1.JobConditionType, condStatus corev1.ConditionStatus) bool { - for _, condition := range job.Status.Conditions { - if condition.Type == condType && condition.Status == condStatus { - return true - } - } - return false - } - - // retrieve job for this opsRequest - jobList := &batchv1.JobList{} - if err := cli.List(reqCtx.Ctx, jobList, client.InNamespace(cluster.Namespace), client.MatchingLabels(getDataScriptJobLabels(cluster.Name, spec.ComponentName, opsRequest.Name))); err != nil { - return appsv1alpha1.OpsFailedPhase, 0, err - } else if len(jobList.Items) == 0 { - return appsv1alpha1.OpsFailedPhase, 0, fmt.Errorf("job not found") - } - - var ( - expectedCount int - succeedCount int - failedCount int - ) - - expectedCount = len(jobList.Items) - // check job status - for _, job := range jobList.Items { - if meetsJobConditions(&job, batchv1.JobComplete, corev1.ConditionTrue) { - succeedCount++ - } else if meetsJobConditions(&job, batchv1.JobFailed, corev1.ConditionTrue) { - failedCount++ - } - } - - opsStatus := appsv1alpha1.OpsRunningPhase - if succeedCount == expectedCount { - opsStatus = appsv1alpha1.OpsSucceedPhase - } else if failedCount+succeedCount == expectedCount { - opsStatus = appsv1alpha1.OpsFailedPhase - } - - patch := client.MergeFrom(opsRequest.DeepCopy()) - opsRequest.Status.Progress = fmt.Sprintf("%d/%d", succeedCount, expectedCount) - - // patch OpsRequest.status.components - if err := cli.Status().Patch(reqCtx.Ctx, opsRequest, patch); err != nil { - return opsStatus, time.Second, err - } - - if succeedCount == expectedCount { - return appsv1alpha1.OpsSucceedPhase, 0, nil - } else if failedCount+succeedCount == expectedCount { - return appsv1alpha1.OpsFailedPhase, 0, fmt.Errorf("%d job execution failed, please check the job log ", failedCount) - } - return appsv1alpha1.OpsRunningPhase, 5 * time.Second, nil -} - -func (o DataScriptOpsHandler) ActionStartedCondition(reqCtx intctrlutil.RequestCtx, cli client.Client, opsRes *OpsResource) (*metav1.Condition, error) { - return appsv1alpha1.NewDataScriptCondition(opsRes.OpsRequest), nil -} - -func (o DataScriptOpsHandler) SaveLastConfiguration(reqCtx intctrlutil.RequestCtx, cli client.Client, opsResource *OpsResource) error { - return nil -} - -// getScriptContent will get script content from script or scriptFrom -func getScriptContent(reqCtx intctrlutil.RequestCtx, cli client.Client, spec *appsv1alpha1.ScriptSpec) ([]string, error) { - script := make([]string, 0) - if len(spec.Script) > 0 { - script = append(script, spec.Script...) - } - if spec.ScriptFrom == nil { - return script, nil - } - configMapsRefs := spec.ScriptFrom.ConfigMapRef - secretRefs := spec.ScriptFrom.SecretRef - - if len(configMapsRefs) > 0 { - obj := &corev1.ConfigMap{} - for _, cm := range configMapsRefs { - if err := cli.Get(reqCtx.Ctx, types.NamespacedName{Namespace: reqCtx.Req.Namespace, Name: cm.Name}, obj); err != nil { - return nil, err - } - script = append(script, obj.Data[cm.Key]) - } - } - - if len(secretRefs) > 0 { - obj := &corev1.Secret{} - for _, secret := range secretRefs { - if err := cli.Get(reqCtx.Ctx, types.NamespacedName{Namespace: reqCtx.Req.Namespace, Name: secret.Name}, obj); err != nil { - return nil, err - } - if obj.Data[secret.Key] == nil { - return nil, fmt.Errorf("secret %s/%s does not have key %s", reqCtx.Req.Namespace, secret.Name, secret.Key) - } - secretData := string(obj.Data[secret.Key]) - // trim the last \n - if len(secretData) > 0 && secretData[len(secretData)-1] == '\n' { - secretData = secretData[:len(secretData)-1] - } - script = append(script, secretData) - } - } - return script, nil -} - -// func getTargetService(reqCtx intctrlutil.RequestCtx, cli client.Client, clusterObjectKey client.ObjectKey, componentName string) (string, error) { -// // get svc -// service := &corev1.Service{} -// serviceName := fmt.Sprintf("%s-%s", clusterObjectKey.Name, componentName) -// if err := cli.Get(reqCtx.Ctx, types.NamespacedName{Namespace: clusterObjectKey.Namespace, Name: serviceName}, service); err != nil { -// return "", err -// } -// return serviceName, nil -// } - -// func buildDataScriptJobs(reqCtx intctrlutil.RequestCtx, cli client.Client, cluster *appsv1alpha1.Cluster, component *appsv1alpha1.ClusterComponentSpec, -// ops *appsv1alpha1.OpsRequest, charType string) ([]*batchv1.Job, error) { -// engineForJob, err := register.NewClusterCommands(charType) -// if err != nil || engineForJob == nil { -// return nil, intctrlutil.NewFatalError(err.Error()) -// } -// -// buildJob := func(endpoint string) (*batchv1.Job, error) { -// envs := []corev1.EnvVar{} -// -// envs = append(envs, corev1.EnvVar{ -// Name: "KB_HOST", -// Value: endpoint, -// }) -// -// // parse username and password -// secretFrom := ops.Spec.ScriptSpec.Secret -// if secretFrom == nil { -// return nil, intctrlutil.NewFatalError("missing secret for user & password") -// } -// -// // verify secrets exist -// if err := cli.Get(reqCtx.Ctx, types.NamespacedName{Namespace: reqCtx.Req.Namespace, Name: secretFrom.Name}, &corev1.Secret{}); err != nil { -// return nil, intctrlutil.NewFatalError(err.Error()) -// } -// -// envs = append(envs, corev1.EnvVar{ -// Name: "KB_USER", -// ValueFrom: &corev1.EnvVarSource{ -// SecretKeyRef: &corev1.SecretKeySelector{ -// Key: secretFrom.UsernameKey, -// LocalObjectReference: corev1.LocalObjectReference{ -// Name: secretFrom.Name, -// }, -// }, -// }, -// }) -// envs = append(envs, corev1.EnvVar{ -// Name: "KB_PASSWD", -// ValueFrom: &corev1.EnvVarSource{ -// SecretKeyRef: &corev1.SecretKeySelector{ -// Key: secretFrom.PasswordKey, -// LocalObjectReference: corev1.LocalObjectReference{ -// Name: secretFrom.Name, -// }, -// }, -// }, -// }) -// -// // parse scripts -// scripts, err := getScriptContent(reqCtx, cli, ops.Spec.ScriptSpec) -// if err != nil { -// return nil, intctrlutil.NewFatalError(err.Error()) -// } -// -// envs = append(envs, corev1.EnvVar{ -// Name: "KB_SCRIPT", -// Value: strings.Join(scripts, "\n"), -// }) -// -// jobCmdTpl, envVars, err := engineForJob.ExecuteCommand(scripts) -// if err != nil { -// return nil, intctrlutil.NewFatalError(err.Error()) -// } -// if envVars != nil { -// envs = append(envs, envVars...) -// } -// containerImg := viper.GetString(constant.KBDataScriptClientsImage) -// if len(ops.Spec.ScriptSpec.Image) != 0 { -// containerImg = ops.Spec.ScriptSpec.Image -// } -// if len(containerImg) == 0 { -// return nil, intctrlutil.NewFatalError("image is empty") -// } -// -// container := corev1.Container{ -// Name: "datascript", -// Image: containerImg, -// ImagePullPolicy: corev1.PullPolicy(viper.GetString(constant.KBImagePullPolicy)), -// Command: jobCmdTpl, -// Env: envs, -// } -// randomStr, _ := password.Generate(4, 0, 0, true, false) -// jobName := fmt.Sprintf("%s-%s-%s-%s", cluster.Name, "script", ops.Name, randomStr) -// if len(jobName) > 63 { -// jobName = strings.TrimSuffix(jobName[:63], "-") -// } -// -// job := &batchv1.Job{ -// ObjectMeta: metav1.ObjectMeta{ -// Name: jobName, -// Namespace: cluster.Namespace, -// }, -// } -// intctrlutil.InjectZeroResourcesLimitsIfEmpty(&container) -// // set backoff limit to 0, so that the job will not be restarted -// job.Spec.BackoffLimit = pointer.Int32(0) -// job.Spec.Template.Spec.RestartPolicy = corev1.RestartPolicyNever -// job.Spec.Template.Spec.Containers = []corev1.Container{container} -// job.Spec.Template.Spec.ImagePullSecrets = intctrlutil.BuildImagePullSecrets() -// -// // add labels -// job.Labels = getDataScriptJobLabels(cluster.Name, component.Name, ops.Name) -// // add tolerations -// schedulingPolicy, err := scheduling.BuildSchedulingPolicy(cluster, component) -// if err != nil { -// return nil, intctrlutil.NewFatalError(err.Error()) -// } -// job.Spec.Template.Spec.Tolerations = schedulingPolicy.Tolerations -// // add owner reference -// scheme, _ := appsv1alpha1.SchemeBuilder.Build() -// if err := controllerutil.SetOwnerReference(ops, job, scheme); err != nil { -// return nil, intctrlutil.NewFatalError(err.Error()) -// } -// return job, nil -// } -// -// // parse kb host -// var endpoint string -// var job *batchv1.Job -// -// jobs := make([]*batchv1.Job, 0) -// if ops.Spec.ScriptSpec.Selector == nil { -// if endpoint, err = getTargetService(reqCtx, cli, client.ObjectKeyFromObject(cluster), component.Name); err != nil { -// return nil, intctrlutil.NewFatalError(err.Error()) -// } -// if job, err = buildJob(endpoint); err != nil { -// return nil, intctrlutil.NewFatalError(err.Error()) -// } -// jobs = append(jobs, job) -// return jobs, nil -// } -// -// selector, err := metav1.LabelSelectorAsSelector(ops.Spec.ScriptSpec.Selector) -// if err != nil { -// return nil, intctrlutil.NewFatalError(err.Error()) -// } -// -// pods := &corev1.PodList{} -// if err = cli.List(reqCtx.Ctx, pods, client.InNamespace(cluster.Namespace), -// client.MatchingLabels{ -// constant.AppInstanceLabelKey: cluster.Name, -// constant.KBAppComponentLabelKey: component.Name, -// }, -// client.MatchingLabelsSelector{Selector: selector}, -// ); err != nil { -// return nil, intctrlutil.NewFatalError(err.Error()) -// } else if len(pods.Items) == 0 { -// return nil, intctrlutil.NewFatalError(err.Error()) -// } -// -// for _, pod := range pods.Items { -// endpoint = pod.Status.PodIP -// if job, err = buildJob(endpoint); err != nil { -// return nil, intctrlutil.NewFatalError(err.Error()) -// } else { -// jobs = append(jobs, job) -// } -// } -// return jobs, nil -// } - -func getDataScriptJobLabels(cluster, component, request string) map[string]string { - return map[string]string{ - constant.AppInstanceLabelKey: cluster, - constant.KBAppComponentLabelKey: component, - constant.OpsRequestNameLabelKey: request, - constant.OpsRequestTypeLabelKey: string(appsv1alpha1.DataScriptType), - } -} diff --git a/controllers/apps/operations/datascript_test.go b/controllers/apps/operations/datascript_test.go deleted file mode 100644 index 1218c56ae3b..00000000000 --- a/controllers/apps/operations/datascript_test.go +++ /dev/null @@ -1,444 +0,0 @@ -/* -Copyright (C) 2022-2024 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package operations - -import ( - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" - intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" - "github.com/apecloud/kubeblocks/pkg/generics" - testapps "github.com/apecloud/kubeblocks/pkg/testutil/apps" -) - -var _ = Describe("DataScriptOps", func() { - var ( - randomStr = testCtx.GetRandomStr() - compDefName = "test-compdef-" + randomStr - clusterName = "test-cluster-" + randomStr - - clusterObj *appsv1alpha1.Cluster - opsResource *OpsResource - reqCtx intctrlutil.RequestCtx - ) - - int32Ptr := func(i int32) *int32 { - return &i - } - - cleanEnv := func() { - // must wait till resources deleted and no longer existed before the testcases start, - // otherwise if later it needs to create some new resource objects with the same name, - // in race conditions, it will find the existence of old objects, resulting failure to - // create the new objects. - By("clean resources") - - // delete cluster(and all dependent sub-resources), cluster definition - testapps.ClearClusterResourcesWithRemoveFinalizerOption(&testCtx) - - // delete rest resources - inNS := client.InNamespace(testCtx.DefaultNamespace) - ml := client.HasLabels{testCtx.TestObjLabelKey} - // namespaced - testapps.ClearResources(&testCtx, generics.OpsRequestSignature, inNS, ml) - testapps.ClearResources(&testCtx, generics.SecretSignature, inNS, ml) - testapps.ClearResources(&testCtx, generics.ConfigMapSignature, inNS, ml) - testapps.ClearResources(&testCtx, generics.JobSignature, inNS, ml) - } - - BeforeEach(cleanEnv) - - AfterEach(cleanEnv) - - createClusterDatascriptOps := func(comp string, ttlBeforeAbort int32) *appsv1alpha1.OpsRequest { - opsName := "datascript-ops-" + testCtx.GetRandomStr() - ops := testapps.NewOpsRequestObj(opsName, testCtx.DefaultNamespace, - clusterObj.Name, appsv1alpha1.DataScriptType) - ops.Spec.ScriptSpec = &appsv1alpha1.ScriptSpec{ - ComponentOps: appsv1alpha1.ComponentOps{ComponentName: comp}, - Script: []string{"CREATE TABLE test (id INT);"}, - } - ops.Spec.PreConditionDeadlineSeconds = int32Ptr(ttlBeforeAbort) - Expect(testCtx.CreateObj(testCtx.Ctx, ops)).Should(Succeed()) - ops.Status.Phase = appsv1alpha1.OpsPendingPhase - return ops - } - - patchOpsPhase := func(opsKey client.ObjectKey, phase appsv1alpha1.OpsPhase) { - ops := &appsv1alpha1.OpsRequest{} - Eventually(func(g Gomega) { - g.Expect(k8sClient.Get(testCtx.Ctx, opsKey, ops)).Should(Succeed()) - g.Expect(testapps.ChangeObjStatus(&testCtx, ops, func() { - ops.Status.Phase = phase - })).Should(Succeed()) - }).Should(Succeed()) - } - - patchClusterStatus := func(phase appsv1alpha1.ClusterPhase) { - var compPhase appsv1alpha1.ClusterComponentPhase - switch phase { - case appsv1alpha1.RunningClusterPhase: - compPhase = appsv1alpha1.RunningClusterCompPhase - case appsv1alpha1.StoppedClusterPhase: - compPhase = appsv1alpha1.StoppedClusterCompPhase - case appsv1alpha1.FailedClusterPhase: - compPhase = appsv1alpha1.FailedClusterCompPhase - case appsv1alpha1.AbnormalClusterPhase: - compPhase = appsv1alpha1.AbnormalClusterCompPhase - case appsv1alpha1.CreatingClusterPhase: - compPhase = appsv1alpha1.CreatingClusterCompPhase - case appsv1alpha1.UpdatingClusterPhase: - compPhase = appsv1alpha1.UpdatingClusterCompPhase - } - - Expect(testapps.ChangeObjStatus(&testCtx, clusterObj, func() { - clusterObj.Status.Phase = phase - clusterObj.Status.Components = map[string]appsv1alpha1.ClusterComponentStatus{ - defaultCompName: { - Phase: compPhase, - }, - } - })).Should(Succeed()) - } - - Context("with Cluster which has MySQL ConsensusSet", func() { - BeforeEach(func() { - By("mock cluster") - _, _, clusterObj = initOperationsResources(compDefName, clusterName) - - By("init opsResource") - opsResource = &OpsResource{ - Cluster: clusterObj, - Recorder: k8sManager.GetEventRecorderFor("opsrequest-controller"), - } - - reqCtx = intctrlutil.RequestCtx{ - Ctx: testCtx.Ctx, - Log: logf.FromContext(testCtx.Ctx).WithValues("datascript", testCtx.DefaultNamespace), - } - }) - - AfterEach(func() { - By("clean resources") - inNS := client.InNamespace(testCtx.DefaultNamespace) - testapps.ClearResources(&testCtx, generics.ClusterSignature, inNS, client.HasLabels{testCtx.TestObjLabelKey}) - testapps.ClearResources(&testCtx, generics.ServiceSignature, inNS, client.HasLabels{testCtx.TestObjLabelKey}) - testapps.ClearResources(&testCtx, generics.OpsRequestSignature, inNS, client.HasLabels{testCtx.TestObjLabelKey}) - testapps.ClearResources(&testCtx, generics.ServiceSignature, inNS, client.HasLabels{testCtx.TestObjLabelKey}) - testapps.ClearResources(&testCtx, generics.JobSignature, inNS, client.HasLabels{testCtx.TestObjLabelKey}) - }) - - It("create a datascript ops with ttlSecondsBeforeAbort-0, abort immediately", func() { - By("patch cluster to creating") - patchClusterStatus(appsv1alpha1.CreatingClusterPhase) - - By("create a datascript ops with ttlSecondsBeforeAbort=0") - // create a datascript ops with ttlSecondsBeforeAbort=0 - ops := createClusterDatascriptOps(defaultCompName, 0) - opsKey := client.ObjectKeyFromObject(ops) - patchOpsPhase(opsKey, appsv1alpha1.OpsCreatingPhase) - Expect(k8sClient.Get(testCtx.Ctx, opsKey, ops)).Should(Succeed()) - opsResource.OpsRequest = ops - - reqCtx.Req = reconcile.Request{NamespacedName: opsKey} - By("check the opsRequest phase, should fail") - _, err := GetOpsManager().Do(reqCtx, k8sClient, opsResource) - Expect(err).ShouldNot(HaveOccurred()) - Expect(ops.Status.Phase).Should(Equal(appsv1alpha1.OpsFailedPhase)) - }) - - It("create a datascript ops with ttlSecondsBeforeAbort=100, should requeue request", func() { - By("patch cluster to creating") - patchClusterStatus(appsv1alpha1.CreatingClusterPhase) - - By("create a datascript ops with ttlSecondsBeforeAbort=100") - // create a datascript ops with ttlSecondsBeforeAbort=0 - ops := createClusterDatascriptOps(defaultCompName, 100) - opsKey := client.ObjectKeyFromObject(ops) - patchOpsPhase(opsKey, appsv1alpha1.OpsPendingPhase) - Expect(k8sClient.Get(testCtx.Ctx, opsKey, ops)).Should(Succeed()) - opsResource.OpsRequest = ops - prevOpsStatus := ops.Status.Phase - - reqCtx.Req = reconcile.Request{NamespacedName: opsKey} - By("check the opsRequest phase") - _, err := GetOpsManager().Do(reqCtx, k8sClient, opsResource) - Expect(err).Should(Succeed()) - Expect(ops.Status.Phase).Should(Equal(prevOpsStatus)) - }) - - // TODO(v1.0): depends on clusterDefinition and lorry? - // It("create a datascript ops on running cluster", func() { - // By("patch cluster to running") - // patchClusterStatus(appsv1alpha1.RunningClusterPhase) - // - // By("create a datascript ops with ttlSecondsBeforeAbort=0") - // ops := createClusterDatascriptOps(defaultCompName, 0) - // opsResource.OpsRequest = ops - // opsKey := client.ObjectKeyFromObject(ops) - // patchOpsPhase(opsKey, appsv1alpha1.OpsCreatingPhase) - // Expect(k8sClient.Get(testCtx.Ctx, opsKey, ops)).Should(Succeed()) - // opsResource.OpsRequest = ops - // - // reqCtx.Req = reconcile.Request{NamespacedName: opsKey} - // By("check the opsRequest phase, should fail, cause pod is missing") - // _, err := GetOpsManager().Do(reqCtx, k8sClient, opsResource) - // Expect(err).ShouldNot(HaveOccurred()) - // Expect(ops.Status.Phase).Should(Equal(appsv1alpha1.OpsFailedPhase)) - // }) - - // TODO(v1.0): depends on clusterDefinition and lorry? - // It("reconcile a datascript ops on running cluster, patch job to complete", func() { - // By("patch cluster to running") - // patchClusterStatus(appsv1alpha1.RunningClusterPhase) - // - // By("create a datascript ops with ttlSecondsBeforeAbort=0") - // ops := createClusterDatascriptOps(defaultCompName, 0) - // opsResource.OpsRequest = ops - // opsKey := client.ObjectKeyFromObject(ops) - // patchOpsPhase(opsKey, appsv1alpha1.OpsRunningPhase) - // Expect(k8sClient.Get(testCtx.Ctx, opsKey, ops)).Should(Succeed()) - // opsResource.OpsRequest = ops - // - // reqCtx.Req = reconcile.Request{NamespacedName: opsKey} - // By("mock a job, missing service, should fail") - // comp := clusterObj.Spec.GetComponentByName(defaultCompName) - // _, err := buildDataScriptJobs(reqCtx, k8sClient, clusterObj, comp, ops, "mysql") - // Expect(err).Should(HaveOccurred()) - // - // By("mock a service, should pass") - // serviceName := fmt.Sprintf("%s-%s", clusterObj.Name, comp.Name) - // service := &corev1.Service{ - // ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: clusterObj.Namespace}, - // Spec: corev1.ServiceSpec{Ports: []corev1.ServicePort{{Port: 3306}}}, - // } - // err = k8sClient.Create(testCtx.Ctx, service) - // Expect(err).Should(Succeed()) - // - // By("mock a job one more time, fail with missing secret") - // _, err = buildDataScriptJobs(reqCtx, k8sClient, clusterObj, comp, ops, "mysql") - // Expect(err).Should(HaveOccurred()) - // Expect(err.Error()).Should(ContainSubstring("missing secret")) - // - // By("patch a secret name to ops, fail with missing secret") - // secretName := fmt.Sprintf("%s-%s", clusterObj.Name, comp.Name) - // patch := client.MergeFrom(ops.DeepCopy()) - // ops.Spec.ScriptSpec.Secret = &appsv1alpha1.ScriptSecret{ - // Name: secretName, - // PasswordKey: "password", - // UsernameKey: "username", - // } - // Expect(k8sClient.Patch(testCtx.Ctx, ops, patch)).Should(Succeed()) - // - // _, err = buildDataScriptJobs(reqCtx, k8sClient, clusterObj, comp, ops, "mysql") - // Expect(err).Should(HaveOccurred()) - // Expect(err.Error()).Should(ContainSubstring(secretName)) - // - // By("mock a secret, should pass") - // secret := &corev1.Secret{ - // ObjectMeta: metav1.ObjectMeta{Name: secretName, Namespace: clusterObj.Namespace}, - // Type: corev1.SecretTypeOpaque, - // Data: map[string][]byte{ - // "password": []byte("123456"), - // "username": []byte("hellocoffee"), - // }, - // } - // err = k8sClient.Create(testCtx.Ctx, secret) - // Expect(err).Should(Succeed()) - // - // By("create job, should pass") - // viper.Set(constant.KBDataScriptClientsImage, "apecloud/kubeblocks-clients:latest") - // jobs, err := buildDataScriptJobs(reqCtx, k8sClient, clusterObj, comp, ops, "mysql") - // Expect(err).Should(Succeed()) - // job := jobs[0] - // Expect(k8sClient.Create(testCtx.Ctx, job)).Should(Succeed()) - // - // By("reconcile the opsRequest phase") - // _, err = GetOpsManager().Reconcile(reqCtx, k8sClient, opsResource) - // Expect(err).Should(Succeed()) - // Expect(ops.Status.Phase).Should(Equal(appsv1alpha1.OpsRunningPhase)) - // - // By("patch job to succeed") - // Eventually(func(g Gomega) { - // g.Expect(testapps.ChangeObjStatus(&testCtx, job, func() { - // job.Status.Succeeded = 1 - // job.Status.Conditions = append(job.Status.Conditions, - // batchv1.JobCondition{ - // Type: batchv1.JobComplete, - // Status: corev1.ConditionTrue, - // }) - // })) - // }).Should(Succeed()) - // - // _, err = GetOpsManager().Reconcile(reqCtx, k8sClient, opsResource) - // Expect(err).Should(Succeed()) - // Expect(ops.Status.Phase).Should(Equal(appsv1alpha1.OpsSucceedPhase)) - // - // Expect(k8sClient.Delete(testCtx.Ctx, service)).Should(Succeed()) - // Expect(k8sClient.Delete(testCtx.Ctx, job)).Should(Succeed()) - // Expect(k8sClient.Delete(testCtx.Ctx, secret)).Should(Succeed()) - // }) - - // TODO(v1.0): depends on clusterDefinition and lorry? - // It("reconcile a datascript ops on running cluster, patch job to failed", func() { - // By("patch cluster to running") - // patchClusterStatus(appsv1alpha1.RunningClusterPhase) - // - // By("create a datascript ops with ttlSecondsBeforeAbort=0") - // ops := createClusterDatascriptOps(defaultCompName, 0) - // opsResource.OpsRequest = ops - // opsKey := client.ObjectKeyFromObject(ops) - // patchOpsPhase(opsKey, appsv1alpha1.OpsRunningPhase) - // Expect(k8sClient.Get(testCtx.Ctx, opsKey, ops)).Should(Succeed()) - // opsResource.OpsRequest = ops - // - // reqCtx.Req = reconcile.Request{NamespacedName: opsKey} - // comp := clusterObj.Spec.GetComponentByName(defaultCompName) - // By("mock a service, should pass") - // serviceName := fmt.Sprintf("%s-%s", clusterObj.Name, comp.Name) - // service := &corev1.Service{ - // ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: clusterObj.Namespace}, - // Spec: corev1.ServiceSpec{Ports: []corev1.ServicePort{{Port: 3306}}}, - // } - // err := k8sClient.Create(testCtx.Ctx, service) - // Expect(err).Should(Succeed()) - // - // By("patch a secret name to ops") - // secretName := fmt.Sprintf("%s-%s", clusterObj.Name, comp.Name) - // secret := &corev1.Secret{ - // ObjectMeta: metav1.ObjectMeta{Name: secretName, Namespace: clusterObj.Namespace}, - // Type: corev1.SecretTypeOpaque, - // Data: map[string][]byte{ - // "password": []byte("123456"), - // "username": []byte("hellocoffee"), - // }, - // } - // patch := client.MergeFrom(ops.DeepCopy()) - // ops.Spec.ScriptSpec.Secret = &appsv1alpha1.ScriptSecret{ - // Name: secretName, - // PasswordKey: "password", - // UsernameKey: "username", - // } - // Expect(k8sClient.Patch(testCtx.Ctx, ops, patch)).Should(Succeed()) - // - // By("mock a secret, should pass") - // err = k8sClient.Create(testCtx.Ctx, secret) - // Expect(err).Should(Succeed()) - // - // By("create job, should pass") - // viper.Set(constant.KBDataScriptClientsImage, "apecloud/kubeblocks-clients:latest") - // jobs, err := buildDataScriptJobs(reqCtx, k8sClient, clusterObj, comp, ops, "mysql") - // Expect(err).Should(Succeed()) - // job := jobs[0] - // Expect(k8sClient.Create(testCtx.Ctx, job)).Should(Succeed()) - // - // By("reconcile the opsRequest phase") - // _, err = GetOpsManager().Reconcile(reqCtx, k8sClient, opsResource) - // Expect(err).Should(Succeed()) - // Expect(ops.Status.Phase).Should(Equal(appsv1alpha1.OpsRunningPhase)) - // - // By("patch job to failed") - // Eventually(func(g Gomega) { - // g.Expect(testapps.ChangeObjStatus(&testCtx, job, func() { - // job.Status.Succeeded = 1 - // job.Status.Conditions = append(job.Status.Conditions, - // batchv1.JobCondition{ - // Type: batchv1.JobFailed, - // Status: corev1.ConditionTrue, - // }) - // })) - // }).Should(Succeed()) - // - // _, err = GetOpsManager().Reconcile(reqCtx, k8sClient, opsResource) - // Expect(err).Should(Succeed()) - // Expect(ops.Status.Phase).Should(Equal(appsv1alpha1.OpsFailedPhase)) - // - // Expect(k8sClient.Delete(testCtx.Ctx, service)).Should(Succeed()) - // Expect(k8sClient.Delete(testCtx.Ctx, job)).Should(Succeed()) - // Expect(k8sClient.Delete(testCtx.Ctx, secret)).Should(Succeed()) - // }) - - It("parse script from spec", func() { - cmName := "test-configmap" - secretName := "test-secret" - - opsName := "datascript-ops-" + testCtx.GetRandomStr() - ops := testapps.NewOpsRequestObj(opsName, testCtx.DefaultNamespace, - clusterObj.Name, appsv1alpha1.DataScriptType) - ops.Spec.ScriptSpec = &appsv1alpha1.ScriptSpec{ - ComponentOps: appsv1alpha1.ComponentOps{ComponentName: defaultCompName}, - Script: []string{"CREATE TABLE test (id INT);"}, - ScriptFrom: &appsv1alpha1.ScriptFrom{ - ConfigMapRef: []corev1.ConfigMapKeySelector{ - { - Key: "cm-key", - LocalObjectReference: corev1.LocalObjectReference{Name: cmName}, - }, - }, - SecretRef: []corev1.SecretKeySelector{ - { - Key: "secret-key", - LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, - }, - }, - }, - } - reqCtx.Req = reconcile.Request{NamespacedName: client.ObjectKeyFromObject(ops)} - _, err := getScriptContent(reqCtx, k8sClient, ops.Spec.ScriptSpec) - Expect(err).Should(HaveOccurred()) - - // create configmap - configMap := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: cmName, - Namespace: testCtx.DefaultNamespace, - }, - Data: map[string]string{ - "cm-key": "CREATE TABLE t1 (id INT);", - }, - } - - Expect(k8sClient.Create(testCtx.Ctx, configMap)).Should(Succeed()) - _, err = getScriptContent(reqCtx, k8sClient, ops.Spec.ScriptSpec) - Expect(err).Should(HaveOccurred()) - - // create configmap - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - Namespace: testCtx.DefaultNamespace, - }, - StringData: map[string]string{ - "secret-key": "CREATE TABLE t1 (id INT);", - }, - } - Expect(k8sClient.Create(testCtx.Ctx, secret)).Should(Succeed()) - _, err = getScriptContent(reqCtx, k8sClient, ops.Spec.ScriptSpec) - Expect(err).Should(Succeed()) - }) - }) -}) diff --git a/deploy/helm/crds/apps.kubeblocks.io_opsrequests.yaml b/deploy/helm/crds/apps.kubeblocks.io_opsrequests.yaml index 5551e540d88..ac1191980e8 100644 --- a/deploy/helm/crds/apps.kubeblocks.io_opsrequests.yaml +++ b/deploy/helm/crds/apps.kubeblocks.io_opsrequests.yaml @@ -4476,207 +4476,6 @@ spec: required: - backupName type: object - scriptSpec: - description: |- - Specifies the image and scripts for executing engine-specific operations such as creating databases or users. - It supports limited engines including MySQL, PostgreSQL, Redis, MongoDB. - - - ScriptSpec has been replaced by the more versatile OpsDefinition. - It is recommended to use OpsDefinition instead. - ScriptSpec is deprecated and will be removed in a future version. - properties: - componentName: - description: Specifies the name of the Component. - type: string - image: - description: |- - Specifies the image to be used to execute scripts. - - - By default, the image "apecloud/kubeblocks-datascript:latest" is used. - type: string - script: - description: |- - Defines the content of scripts to be executed. - - - All scripts specified in this field will be executed in the order they are provided. - - - Note: this field cannot be modified once set. - items: - type: string - type: array - x-kubernetes-validations: - - message: forbidden to update spec.scriptSpec.script - rule: self == oldSelf - scriptFrom: - description: |- - Specifies the sources of the scripts to be executed. - Each script can be imported either from a ConfigMap or a Secret. - - - All scripts obtained from the sources specified in this field will be executed after - any scripts provided in the `script` field. - - - Execution order: - 1. Scripts provided in the `script` field, in the order of the scripts listed. - 2. Scripts imported from ConfigMaps, in the order of the sources listed. - 3. Scripts imported from Secrets, in the order of the sources listed. - - - Note: this field cannot be modified once set. - properties: - configMapRef: - description: |- - A list of ConfigMapKeySelector objects, each specifies a ConfigMap and a key containing the script. - - - Note: This field cannot be modified once set. - items: - description: Selects a key from a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the ConfigMap or its key - must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: array - x-kubernetes-validations: - - message: forbidden to update spec.scriptSpec.scriptFrom.configMapRef - rule: self == oldSelf - secretRef: - description: |- - A list of SecretKeySelector objects, each specifies a Secret and a key containing the script. - - - Note: This field cannot be modified once set. - items: - description: SecretKeySelector selects a key of a Secret. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: array - x-kubernetes-validations: - - message: forbidden to update spec.scriptSpec.scriptFrom.secretRef - rule: self == oldSelf - type: object - x-kubernetes-validations: - - message: forbidden to update spec.scriptSpec.scriptFrom - rule: self == oldSelf - secret: - description: Defines the secret to be used to execute the script. - If not specified, the default cluster root credential secret - is used. - properties: - name: - description: Specifies the name of the secret. - maxLength: 63 - pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ - type: string - passwordKey: - default: password - description: Used to specify the password part of the secret. - type: string - usernameKey: - default: username - description: Used to specify the username part of the secret. - type: string - required: - - name - type: object - selector: - description: |- - Specifies the labels used to select the Pods on which the script should be executed. - - - By default, the script is executed on the Pod associated with the service named "{clusterName}-{componentName}", - which typically routes to the Pod with the primary/leader role. - - - However, some Components, such as Redis, do not synchronize account information between primary and secondary Pods. - In these cases, the script must be executed on all replica Pods matching the selector. - - - Note: this field cannot be modified once set. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: forbidden to update spec.scriptSpec.script.selector - rule: self == oldSelf - required: - - componentName - type: object switchover: description: Lists Switchover objects, each specifying a Component to perform the switchover operation. @@ -4739,7 +4538,7 @@ spec: description: |- Specifies the type of this operation. Supported types include "Start", "Stop", "Restart", "Switchover", "VerticalScaling", "HorizontalScaling", "VolumeExpansion", "Reconfiguring", "Upgrade", "Backup", "Restore", - "Expose", "DataScript", "RebuildInstance", "Custom". + "Expose", "RebuildInstance", "Custom". Note: This field is immutable once set. @@ -4754,7 +4553,6 @@ spec: - Stop - Expose - Switchover - - DataScript - Backup - Restore - RebuildInstance @@ -5207,7 +5005,7 @@ spec: Describes the detailed status of the OpsRequest. Possible condition types include "Cancelled", "WaitForProgressing", "Validated", "Succeed", "Failed", "Restarting", "VerticalScaling", "HorizontalScaling", "VolumeExpanding", "Reconfigure", "Switchover", "Stopping", "Starting", - "VersionUpgrading", "Exposing", "ExecuteDataScript", "Backup", "InstancesRebuilding", "CustomOperation". + "VersionUpgrading", "Exposing", "Backup", "InstancesRebuilding", "CustomOperation". items: description: "Condition contains details for one aspect of the current state of this API Resource.\n---\nThis struct is intended for diff --git a/deploy/helm/templates/deployment.yaml b/deploy/helm/templates/deployment.yaml index 7141e5884e2..ea7f28bbdb6 100644 --- a/deploy/helm/templates/deployment.yaml +++ b/deploy/helm/templates/deployment.yaml @@ -47,13 +47,6 @@ spec: {{- toYaml .Values.resources | nindent 12 }} command: - /bin/true - - name: datascript - image: "{{ .Values.image.registry | default "docker.io" }}/{{ .Values.image.datascript.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - resources: - {{- toYaml .Values.resources | nindent 12 }} - command: - - /bin/true containers: - name: manager args: @@ -120,8 +113,6 @@ spec: value: {{ .Values.image.imagePullSecrets | toJson | quote }} - name: KUBEBLOCKS_TOOLS_IMAGE value: "{{ .Values.image.registry | default "docker.io" }}/{{ .Values.image.tools.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" - - name: KUBEBLOCKS_DATASCRIPT_CLIENTS_IMAGE - value: "{{ .Values.image.registry | default "docker.io" }}/{{ .Values.image.datascript.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" - name: KUBEBLOCKS_SERVICEACCOUNT_NAME value: {{ include "kubeblocks.serviceAccountName" . }} {{- if .Capabilities.APIVersions.Has "snapshot.storage.k8s.io/v1" }} diff --git a/deploy/helm/values.yaml b/deploy/helm/values.yaml index 04003b1dd68..50cf91afc22 100644 --- a/deploy/helm/values.yaml +++ b/deploy/helm/values.yaml @@ -20,8 +20,6 @@ image: imagePullSecrets: [] tools: repository: apecloud/kubeblocks-tools - datascript: - repository: apecloud/kubeblocks-datascript ## @param replicaCount ## diff --git a/docker/Dockerfile-datascript b/docker/Dockerfile-datascript deleted file mode 100644 index da47d704d20..00000000000 --- a/docker/Dockerfile-datascript +++ /dev/null @@ -1,19 +0,0 @@ -# Build client images for mysql and postgres to support datascripts -# The latest release to date is 3.18) as of 20230625 -FROM docker.io/alpine:3.18 as dist -# ARG APK_MIRROR - -# install tools via apk -ENV APK_MIRROR=${APK_MIRROR} -RUN if [ -n "${APK_MIRROR}" ]; then sed -i "s/dl-cdn.alpinelinux.org/${APK_MIRROR}/g" /etc/apk/repositories; fi - -RUN apk add --no-cache jq --allow-untrusted - -RUN apk add --no-cache postgresql-client --allow-untrusted - -RUN apk add --no-cache mysql-client mariadb-connector-c --allow-untrusted - -RUN apk add redis -RUN rm -rf /var/cache/apk/* - -USER 65532:65532 diff --git a/docker/docker.mk b/docker/docker.mk index 004cfff1833..80c02bf5836 100644 --- a/docker/docker.mk +++ b/docker/docker.mk @@ -36,7 +36,6 @@ TOOL_IMG ?= docker.io/apecloud/$(APP_NAME)-tools CLI_IMG ?= docker.io/apecloud/kbcli CHARTS_IMG ?= docker.io/apecloud/$(APP_NAME)-charts CLI_TAG ?= v$(CLI_VERSION) -DATASCRIPT_IMG ?= docker.io/apecloud/$(APP_NAME)-datascript DATAPROTECTION_IMG ?= docker.io/apecloud/$(APP_NAME)-dataprotection # Update whenever you upgrade dev container image @@ -159,34 +158,6 @@ else endif endif -.PHONY: build-datascript-image -build-datascript-image: install-docker-buildx ## Build datascript container image. -ifneq ($(BUILDX_ENABLED), true) - $(DOCKER) build . $(DOCKER_BUILD_ARGS) --file $(DOCKERFILE_DIR)/Dockerfile-datascript --tag ${DATASCRIPT_IMG}:${VERSION} --tag ${DATASCRIPT_IMG}:latest -else -ifeq ($(TAG_LATEST), true) - $(DOCKER) buildx build . $(DOCKER_BUILD_ARGS) --file $(DOCKERFILE_DIR)/Dockerfile-datascript --platform $(BUILDX_PLATFORMS) --tag ${DATASCRIPT_IMG}:latest -else - $(DOCKER) buildx build . $(DOCKER_BUILD_ARGS) --file $(DOCKERFILE_DIR)/Dockerfile-datascript --platform $(BUILDX_PLATFORMS) --tag ${DATASCRIPT_IMG}:${VERSION} -endif -endif - -.PHONY: push-datascript-image -push-datascript-image: install-docker-buildx ## Push datascript container image. -ifneq ($(BUILDX_ENABLED), true) -ifeq ($(TAG_LATEST), true) - $(DOCKER) push ${DATASCRIPT_IMG}:latest -else - $(DOCKER) push ${DATASCRIPT_IMG}:${VERSION} -endif -else -ifeq ($(TAG_LATEST), true) - $(DOCKER) buildx build . $(DOCKER_BUILD_ARGS) --file $(DOCKERFILE_DIR)/Dockerfile-datascript --platform $(BUILDX_PLATFORMS) --tag ${DATASCRIPT_IMG}:latest --push -else - $(DOCKER) buildx build . $(DOCKER_BUILD_ARGS) --file $(DOCKERFILE_DIR)/Dockerfile-datascript --platform $(BUILDX_PLATFORMS) --tag ${DATASCRIPT_IMG}:${VERSION} --push -endif -endif - .PHONY: build-dataprotection-image build-dataprotection-image: install-docker-buildx generate ## Build Operator dataprotection container image. ifneq ($(BUILDX_ENABLED), true) diff --git a/docs/developer_docs/api-reference/cluster.md b/docs/developer_docs/api-reference/cluster.md index 2fdc2ae51b9..f3be628b65e 100644 --- a/docs/developer_docs/api-reference/cluster.md +++ b/docs/developer_docs/api-reference/cluster.md @@ -2547,7 +2547,7 @@ OpsType

Specifies the type of this operation. Supported types include “Start”, “Stop”, “Restart”, “Switchover”, “VerticalScaling”, “HorizontalScaling”, “VolumeExpansion”, “Reconfiguring”, “Upgrade”, “Backup”, “Restore”, -“Expose”, “DataScript”, “RebuildInstance”, “Custom”.

+“Expose”, “RebuildInstance”, “Custom”.

Note: This field is immutable once set.

@@ -7102,7 +7102,7 @@ and other administrative tasks.

ComponentOps

-(Appears on:CustomOpsComponent, HorizontalScaling, RebuildInstance, Reconfigure, ScriptSpec, SpecificOpsRequest, Switchover, UpgradeComponent, VerticalScaling, VolumeExpansion) +(Appears on:CustomOpsComponent, HorizontalScaling, RebuildInstance, Reconfigure, SpecificOpsRequest, Switchover, UpgradeComponent, VerticalScaling, VolumeExpansion)

ComponentOps specifies the Component to be operated on.

@@ -12248,7 +12248,7 @@ OpsType

Specifies the type of this operation. Supported types include “Start”, “Stop”, “Restart”, “Switchover”, “VerticalScaling”, “HorizontalScaling”, “VolumeExpansion”, “Reconfiguring”, “Upgrade”, “Backup”, “Restore”, -“Expose”, “DataScript”, “RebuildInstance”, “Custom”.

+“Expose”, “RebuildInstance”, “Custom”.

Note: This field is immutable once set.

@@ -12501,7 +12501,7 @@ map[string]*github.com/apecloud/kubeblocks/apis/apps/v1alpha1.ReconfiguringStatu

Describes the detailed status of the OpsRequest. Possible condition types include “Cancelled”, “WaitForProgressing”, “Validated”, “Succeed”, “Failed”, “Restarting”, “VerticalScaling”, “HorizontalScaling”, “VolumeExpanding”, “Reconfigure”, “Switchover”, “Stopping”, “Starting”, -“VersionUpgrading”, “Exposing”, “ExecuteDataScript”, “Backup”, “InstancesRebuilding”, “CustomOperation”.

+“VersionUpgrading”, “Exposing”, “Backup”, “InstancesRebuilding”, “CustomOperation”.

@@ -12801,13 +12801,10 @@ If the cluster is not configured for dual-stack, the Service creation fails.

"Backup"

-

DataScriptType the data script operation will execute the data script against the cluster.

- +

"Custom"

RebuildInstance rebuilding an instance is very useful when a node is offline or an instance is unrecoverable.

-

"DataScript"

-

"Expose"

StartType the start operation will start the pods which is deleted in stop operation.

@@ -15175,226 +15172,6 @@ All topologySpreadConstraints are ANDed.

-

ScriptFrom -

-

-(Appears on:ScriptSpec) -

-
-

ScriptFrom specifies the source of the script to be executed, which can be either a ConfigMap or a Secret.

-
- - - - - - - - - - - - - - - - - -
FieldDescription
-configMapRef
- - -[]Kubernetes core/v1.ConfigMapKeySelector - - -
-(Optional) -

A list of ConfigMapKeySelector objects, each specifies a ConfigMap and a key containing the script.

-

Note: This field cannot be modified once set.

-
-secretRef
- - -[]Kubernetes core/v1.SecretKeySelector - - -
-(Optional) -

A list of SecretKeySelector objects, each specifies a Secret and a key containing the script.

-

Note: This field cannot be modified once set.

-
-

ScriptSecret -

-

-(Appears on:ScriptSpec) -

-
-

ScriptSecret represents the secret that is used to execute the script.

-
- - - - - - - - - - - - - - - - - - - - - -
FieldDescription
-name
- -string - -
-

Specifies the name of the secret.

-
-usernameKey
- -string - -
-(Optional) -

Used to specify the username part of the secret.

-
-passwordKey
- -string - -
-(Optional) -

Used to specify the password part of the secret.

-
-

ScriptSpec -

-

-(Appears on:SpecificOpsRequest) -

-
-

ScriptSpec is a legacy feature for executing engine-specific operations such as creating databases or users. -It supports limited engines including MySQL, PostgreSQL, Redis, MongoDB.

-

ScriptSpec has been replaced by the more versatile OpsDefinition. -It is recommended to use OpsDefinition instead. ScriptSpec is deprecated and will be removed in a future version.

-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FieldDescription
-ComponentOps
- - -ComponentOps - - -
-

-(Members of ComponentOps are embedded into this type.) -

-

Specifies the name of the Component.

-
-image
- -string - -
-(Optional) -

Specifies the image to be used to execute scripts.

-

By default, the image “apecloud/kubeblocks-datascript:latest” is used.

-
-secret
- - -ScriptSecret - - -
-(Optional) -

Defines the secret to be used to execute the script. If not specified, the default cluster root credential secret is used.

-
-script
- -[]string - -
-(Optional) -

Defines the content of scripts to be executed.

-

All scripts specified in this field will be executed in the order they are provided.

-

Note: this field cannot be modified once set.

-
-scriptFrom
- - -ScriptFrom - - -
-(Optional) -

Specifies the sources of the scripts to be executed. -Each script can be imported either from a ConfigMap or a Secret.

-

All scripts obtained from the sources specified in this field will be executed after -any scripts provided in the script field.

-

Execution order: -1. Scripts provided in the script field, in the order of the scripts listed. -2. Scripts imported from ConfigMaps, in the order of the sources listed. -3. Scripts imported from Secrets, in the order of the sources listed.

-

Note: this field cannot be modified once set.

-
-selector
- - -Kubernetes meta/v1.LabelSelector - - -
-(Optional) -

Specifies the labels used to select the Pods on which the script should be executed.

-

By default, the script is executed on the Pod associated with the service named “{clusterName}-{componentName}”, -which typically routes to the Pod with the primary/leader role.

-

However, some Components, such as Redis, do not synchronize account information between primary and secondary Pods. -In these cases, the script must be executed on all replica Pods matching the selector.

-

Note: this field cannot be modified once set.

-

SecretRef

@@ -16937,24 +16714,6 @@ Reconfigure -scriptSpec
- - -ScriptSpec - - - - -(Optional) -

Specifies the image and scripts for executing engine-specific operations such as creating databases or users. -It supports limited engines including MySQL, PostgreSQL, Redis, MongoDB.

-

ScriptSpec has been replaced by the more versatile OpsDefinition. -It is recommended to use OpsDefinition instead. -ScriptSpec is deprecated and will be removed in a future version.

- - - - backup
diff --git a/pkg/constant/const.go b/pkg/constant/const.go index 7173f9dfd28..7eb5339c16c 100644 --- a/pkg/constant/const.go +++ b/pkg/constant/const.go @@ -34,11 +34,10 @@ const ( ) const ( - KBServiceAccountName = "KUBEBLOCKS_SERVICEACCOUNT_NAME" - KBToolsImage = "KUBEBLOCKS_TOOLS_IMAGE" - KBImagePullPolicy = "KUBEBLOCKS_IMAGE_PULL_POLICY" - KBImagePullSecrets = "KUBEBLOCKS_IMAGE_PULL_SECRETS" - KBDataScriptClientsImage = "KUBEBLOCKS_DATASCRIPT_CLIENTS_IMAGE" + KBServiceAccountName = "KUBEBLOCKS_SERVICEACCOUNT_NAME" + KBToolsImage = "KUBEBLOCKS_TOOLS_IMAGE" + KBImagePullPolicy = "KUBEBLOCKS_IMAGE_PULL_POLICY" + KBImagePullSecrets = "KUBEBLOCKS_IMAGE_PULL_SECRETS" ) const (