diff --git a/api/v1alpha1/pipeline_types.go b/api/v1alpha1/pipeline_types.go index 11db517d..e715c906 100644 --- a/api/v1alpha1/pipeline_types.go +++ b/api/v1alpha1/pipeline_types.go @@ -17,8 +17,25 @@ limitations under the License. package v1alpha1 import ( + "encoding/json" + "fmt" + "github.com/syntasso/kratix/lib/objectutil" + "os" + "strings" + + "github.com/go-logr/logr" + "github.com/pkg/errors" + "github.com/syntasso/kratix/lib/hash" + "gopkg.in/yaml.v2" + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" ) // PipelineSpec defines the desired state of Pipeline @@ -48,3 +65,536 @@ type Pipeline struct { Spec PipelineSpec `json:"spec,omitempty"` } + +type PipelineFactory struct { + ID string + Promise *Promise + Pipeline *Pipeline + Namespace string + ResourceRequest *unstructured.Unstructured + ResourceWorkflow bool + WorkflowAction Action + WorkflowType Type +} + +// +kubebuilder:object:generate=false +type PipelineJobResources struct { + Name string + Job *batchv1.Job + RequiredResources []client.Object +} + +const ( + kratixActionEnvVar = "KRATIX_WORKFLOW_ACTION" + kratixTypeEnvVar = "KRATIX_WORKFLOW_TYPE" + kratixPromiseEnvVar = "KRATIX_PROMISE_NAME" +) + +func PipelinesFromUnstructured(pipelines []unstructured.Unstructured, logger logr.Logger) ([]Pipeline, error) { + if len(pipelines) == 0 { + return nil, nil + } + + var ps []Pipeline + for _, pipeline := range pipelines { + pipelineLogger := logger.WithValues( + "pipelineKind", pipeline.GetKind(), + "pipelineVersion", pipeline.GetAPIVersion(), + "pipelineName", pipeline.GetName()) + + if pipeline.GetKind() == "Pipeline" && pipeline.GetAPIVersion() == "platform.kratix.io/v1alpha1" { + jsonPipeline, err := pipeline.MarshalJSON() + if err != nil { + pipelineLogger.Error(err, "Failed marshalling pipeline to json") + return nil, err + } + + p := Pipeline{} + err = json.Unmarshal(jsonPipeline, &p) + if err != nil { + pipelineLogger.Error(err, "Failed unmarshalling pipeline") + return nil, err + } + ps = append(ps, p) + } else { + return nil, fmt.Errorf("unsupported pipeline %q (%s.%s)", + pipeline.GetName(), pipeline.GetKind(), pipeline.GetAPIVersion()) + } + } + return ps, nil +} + +func (p *Pipeline) ForPromise(promise *Promise, action Action) *PipelineFactory { + return &PipelineFactory{ + ID: promise.GetName() + "-promise-pipeline", + Promise: promise, + Pipeline: p, + Namespace: SystemNamespace, + WorkflowType: WorkflowTypePromise, + WorkflowAction: action, + } +} + +func (p *Pipeline) ForResource(promise *Promise, action Action, resourceRequest *unstructured.Unstructured) *PipelineFactory { + return &PipelineFactory{ + ID: promise.GetName() + "-resource-pipeline", + Promise: promise, + Pipeline: p, + ResourceRequest: resourceRequest, + Namespace: resourceRequest.GetNamespace(), + ResourceWorkflow: true, + WorkflowType: WorkflowTypeResource, + WorkflowAction: action, + } +} + +func (p *PipelineFactory) Resources(jobEnv []corev1.EnvVar) (PipelineJobResources, error) { + wgScheduling := p.Promise.GetWorkloadGroupScheduling() + schedulingConfigMap, err := p.ConfigMap(wgScheduling) + if err != nil { + return PipelineJobResources{}, err + } + + serviceAccount := p.ServiceAccount() + + role, err := p.ObjectRole() + if err != nil { + return PipelineJobResources{}, err + } + roleBinding := p.ObjectRoleBinding(role.GetName(), serviceAccount) + + job, err := p.PipelineJob(schedulingConfigMap, serviceAccount, jobEnv) + if err != nil { + return PipelineJobResources{}, err + } + + requiredResources := []client.Object{serviceAccount, role, roleBinding} + if p.WorkflowAction == WorkflowActionConfigure { + requiredResources = append(requiredResources, schedulingConfigMap) + } + + return PipelineJobResources{ + Name: p.Pipeline.GetName(), + Job: job, + RequiredResources: requiredResources, + }, nil +} + +func (p *PipelineFactory) ServiceAccount() *corev1.ServiceAccount { + return &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: p.ID, + Namespace: p.Namespace, + Labels: PromiseLabels(p.Promise), + }, + } +} + +func (p *PipelineFactory) ObjectRole() (client.Object, error) { + if p.ResourceWorkflow { + return p.role() + } + return p.clusterRole(), nil +} + +func (p *PipelineFactory) ObjectRoleBinding(roleName string, serviceAccount *corev1.ServiceAccount) client.Object { + if p.ResourceWorkflow { + return p.roleBinding(roleName, serviceAccount) + } + return p.clusterRoleBinding(roleName, serviceAccount) +} + +func (p *PipelineFactory) ConfigMap(workloadGroupScheduling []WorkloadGroupScheduling) (*corev1.ConfigMap, error) { + schedulingYAML, err := yaml.Marshal(workloadGroupScheduling) + if err != nil { + return nil, errors.Wrap(err, "error marshalling destinationSelectors to yaml") + } + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "destination-selectors-" + p.Promise.GetName(), + Namespace: p.Namespace, + Labels: PromiseLabels(p.Promise), + }, + Data: map[string]string{ + "destinationSelectors": string(schedulingYAML), + }, + }, nil +} + +func (p *PipelineFactory) DefaultVolumes(schedulingConfigMap *corev1.ConfigMap) []corev1.Volume { + return []corev1.Volume{ + { + Name: "promise-scheduling", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: schedulingConfigMap.GetName(), + }, + Items: []corev1.KeyToPath{{ + Key: "destinationSelectors", + Path: "promise-scheduling", + }}, + }, + }, + }, + } +} + +func (p *PipelineFactory) DefaultPipelineVolumes() ([]corev1.Volume, []corev1.VolumeMount) { + volumes := []corev1.Volume{ + {Name: "shared-input", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, + {Name: "shared-output", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, + {Name: "shared-metadata", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, + } + volumeMounts := []corev1.VolumeMount{ + {MountPath: "/kratix/input", Name: "shared-input", ReadOnly: true}, + {MountPath: "/kratix/output", Name: "shared-output"}, + {MountPath: "/kratix/metadata", Name: "shared-metadata"}, + } + return volumes, volumeMounts +} + +func (p *PipelineFactory) DefaultEnvVars() []corev1.EnvVar { + return []corev1.EnvVar{ + {Name: kratixActionEnvVar, Value: string(p.WorkflowAction)}, + {Name: kratixTypeEnvVar, Value: string(p.WorkflowType)}, + {Name: kratixPromiseEnvVar, Value: p.Promise.GetName()}, + } +} + +func (p *PipelineFactory) ReaderContainer() corev1.Container { + kind := p.Promise.GroupVersionKind().Kind + group := p.Promise.GroupVersionKind().Group + name := p.Promise.GetName() + + if p.ResourceWorkflow { + kind = p.ResourceRequest.GetKind() + group = p.ResourceRequest.GroupVersionKind().Group + name = p.ResourceRequest.GetName() + } + + return corev1.Container{ + Name: "reader", + Image: os.Getenv("WC_IMG"), + Command: []string{"sh", "-c", "reader"}, + Env: []corev1.EnvVar{ + {Name: "OBJECT_KIND", Value: strings.ToLower(kind)}, + {Name: "OBJECT_GROUP", Value: group}, + {Name: "OBJECT_NAME", Value: name}, + {Name: "OBJECT_NAMESPACE", Value: p.Namespace}, + {Name: "KRATIX_WORKFLOW_TYPE", Value: string(p.WorkflowType)}, + }, + VolumeMounts: []corev1.VolumeMount{ + {MountPath: "/kratix/input", Name: "shared-input"}, + {MountPath: "/kratix/output", Name: "shared-output"}, + }, + } +} + +func (p *PipelineFactory) WorkCreatorContainer() corev1.Container { + workCreatorCommand := "./work-creator" + + args := []string{ + "-input-directory", "/work-creator-files", + "-promise-name", p.Promise.GetName(), + "-pipeline-name", p.Pipeline.GetName(), + "-namespace", p.Namespace, + "-workflow-type", string(p.WorkflowType), + } + + if p.ResourceWorkflow { + args = append(args, "-resource-name", p.ResourceRequest.GetName()) + } + + workCreatorCommand = fmt.Sprintf("%s %s", workCreatorCommand, strings.Join(args, " ")) + + return corev1.Container{ + Name: "work-writer", + Image: os.Getenv("WC_IMG"), + Command: []string{"sh", "-c", workCreatorCommand}, + VolumeMounts: []corev1.VolumeMount{ + {MountPath: "/work-creator-files/input", Name: "shared-output"}, + {MountPath: "/work-creator-files/metadata", Name: "shared-metadata"}, + {MountPath: "/work-creator-files/kratix-system", Name: "promise-scheduling"}, // this volumemount is a configmap + }, + } +} + +func (p *PipelineFactory) PipelineContainers() ([]corev1.Container, []corev1.Volume) { + volumes, defaultVolumeMounts := p.DefaultPipelineVolumes() + pipeline := p.Pipeline + if len(pipeline.Spec.Volumes) > 0 { + volumes = append(volumes, pipeline.Spec.Volumes...) + } + + var containers []corev1.Container + kratixEnvVars := p.DefaultEnvVars() + for _, c := range pipeline.Spec.Containers { + containerVolumeMounts := append(defaultVolumeMounts, c.VolumeMounts...) + + containers = append(containers, corev1.Container{ + Name: c.Name, + Image: c.Image, + VolumeMounts: containerVolumeMounts, + Args: c.Args, + Command: c.Command, + Env: append(kratixEnvVars, c.Env...), + EnvFrom: c.EnvFrom, + ImagePullPolicy: c.ImagePullPolicy, + }) + } + + return containers, volumes +} + +func (p *PipelineFactory) PipelineJob(schedulingConfigMap *corev1.ConfigMap, serviceAccount *corev1.ServiceAccount, env []corev1.EnvVar) (*batchv1.Job, error) { + obj, objHash, err := p.getObjAndHash() + if err != nil { + return nil, err + } + + var imagePullSecrets []corev1.LocalObjectReference + workCreatorPullSecrets := os.Getenv("WC_PULL_SECRET") + if workCreatorPullSecrets != "" { + imagePullSecrets = append(imagePullSecrets, corev1.LocalObjectReference{Name: workCreatorPullSecrets}) + } + + imagePullSecrets = append(imagePullSecrets, p.Pipeline.Spec.ImagePullSecrets...) + + readerContainer := p.ReaderContainer() + pipelineContainers, pipelineVolumes := p.PipelineContainers() + workCreatorContainer := p.WorkCreatorContainer() + statusWriterContainer := p.StatusWriterContainer(obj, env) + + volumes := append(p.DefaultVolumes(schedulingConfigMap), pipelineVolumes...) + + var initContainers []corev1.Container + var containers []corev1.Container + + initContainers = []corev1.Container{readerContainer} + if p.WorkflowAction == WorkflowActionDelete { + initContainers = append(initContainers, pipelineContainers[0:len(pipelineContainers)-1]...) + containers = []corev1.Container{pipelineContainers[len(pipelineContainers)-1]} + } else { + initContainers = append(initContainers, pipelineContainers...) + initContainers = append(initContainers, workCreatorContainer) + containers = []corev1.Container{statusWriterContainer} + } + + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: p.pipelineJobName(), + Namespace: p.Namespace, + Labels: p.pipelineJobLabels(objHash), + }, + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: p.pipelineJobLabels(objHash), + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyOnFailure, + ServiceAccountName: serviceAccount.GetName(), + Containers: containers, + ImagePullSecrets: imagePullSecrets, + InitContainers: initContainers, + Volumes: volumes, + }, + }, + }, + } + + if err := controllerutil.SetControllerReference(obj, job, scheme.Scheme); err != nil { + return nil, err + } + return job, nil +} + +func (p *PipelineFactory) StatusWriterContainer(obj *unstructured.Unstructured, env []corev1.EnvVar) corev1.Container { + return corev1.Container{ + Name: "status-writer", + Image: os.Getenv("WC_IMG"), + Command: []string{"sh", "-c", "update-status"}, + Env: append(env, + corev1.EnvVar{Name: "OBJECT_KIND", Value: strings.ToLower(obj.GetKind())}, + corev1.EnvVar{Name: "OBJECT_GROUP", Value: obj.GroupVersionKind().Group}, + corev1.EnvVar{Name: "OBJECT_NAME", Value: obj.GetName()}, + corev1.EnvVar{Name: "OBJECT_NAMESPACE", Value: p.Namespace}, + ), + VolumeMounts: []corev1.VolumeMount{{ + MountPath: "/work-creator-files/metadata", + Name: "shared-metadata", + }}, + } +} + +func (p *PipelineFactory) pipelineJobName() string { + name := fmt.Sprintf("kratix-%s", p.Promise.GetName()) + + if p.ResourceWorkflow { + name = fmt.Sprintf("%s-%s", name, p.ResourceRequest.GetName()) + } + + name = fmt.Sprintf("%s-%s", name, p.Pipeline.GetName()) + + return objectutil.GenerateObjectName(name) +} + +func (p *PipelineFactory) pipelineJobLabels(requestSHA string) map[string]string { + ls := labels.Merge( + PromiseLabels(p.Promise), + WorkflowLabels(p.WorkflowType, p.WorkflowAction, p.Pipeline.GetName()), + ) + if p.ResourceWorkflow { + ls = labels.Merge(ls, ResourceLabels(p.ResourceRequest)) + } + if requestSHA != "" { + ls[KratixResourceHashLabel] = requestSHA + } + + return ls +} + +func (p *PipelineFactory) getObjAndHash() (*unstructured.Unstructured, string, error) { + uPromise, err := p.Promise.ToUnstructured() + if err != nil { + return nil, "", err + } + + promiseHash, err := hash.ComputeHashForResource(uPromise) + if err != nil { + return nil, "", err + } + + if !p.ResourceWorkflow { + return uPromise, promiseHash, nil + } + + resourceHash, err := hash.ComputeHashForResource(p.ResourceRequest) + if err != nil { + return nil, "", err + } + + return p.ResourceRequest, hash.ComputeHash(fmt.Sprintf("%s-%s", promiseHash, resourceHash)), nil +} + +func (p *PipelineFactory) role() (*rbacv1.Role, error) { + crd, err := p.Promise.GetAPIAsCRD() + if err != nil { + return nil, err + } + plural := crd.Spec.Names.Plural + return &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: p.ID, + Labels: PromiseLabels(p.Promise), + Namespace: p.Namespace, + }, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{crd.Spec.Group}, + Resources: []string{plural, plural + "/status"}, + Verbs: []string{"get", "list", "update", "create", "patch"}, + }, + { + APIGroups: []string{GroupVersion.Group}, + Resources: []string{"works"}, + Verbs: []string{"*"}, + }, + }, + }, nil +} + +func (p *PipelineFactory) roleBinding(roleName string, serviceAccount *corev1.ServiceAccount) *rbacv1.RoleBinding { + return &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: p.ID, + Labels: PromiseLabels(p.Promise), + Namespace: p.Namespace, + }, + RoleRef: rbacv1.RoleRef{ + Kind: "Role", + APIGroup: rbacv1.GroupName, + Name: roleName, + }, + Subjects: []rbacv1.Subject{ + { + Kind: rbacv1.ServiceAccountKind, + Name: serviceAccount.GetName(), + Namespace: serviceAccount.GetNamespace(), + }, + }, + } +} + +func (p *PipelineFactory) clusterRole() *rbacv1.ClusterRole { + return &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: p.ID, + Labels: PromiseLabels(p.Promise), + }, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{GroupVersion.Group}, + Resources: []string{PromisePlural, PromisePlural + "/status", "works"}, + Verbs: []string{"get", "list", "update", "create", "patch"}, + }, + }, + } +} + +func (p *PipelineFactory) clusterRoleBinding(clusterRoleName string, serviceAccount *corev1.ServiceAccount) *rbacv1.ClusterRoleBinding { + return &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: p.ID, + Labels: PromiseLabels(p.Promise), + }, + RoleRef: rbacv1.RoleRef{ + Kind: "ClusterRole", + APIGroup: rbacv1.GroupName, + Name: clusterRoleName, + }, + Subjects: []rbacv1.Subject{ + { + Kind: rbacv1.ServiceAccountKind, + Namespace: serviceAccount.GetNamespace(), + Name: serviceAccount.GetName(), + }, + }, + } +} + +func PromiseLabels(promise *Promise) map[string]string { + return map[string]string{ + PromiseNameLabel: promise.GetName(), + } +} + +func ResourceLabels(request *unstructured.Unstructured) map[string]string { + return map[string]string{ + ResourceNameLabel: request.GetName(), + } +} + +func WorkflowLabels(workflowType Type, workflowAction Action, pipelineName string) map[string]string { + ls := map[string]string{} + + if workflowType != "" { + ls = labels.Merge(ls, map[string]string{ + WorkTypeLabel: string(workflowType), + }) + } + + if pipelineName != "" { + ls = labels.Merge(ls, map[string]string{ + PipelineNameLabel: pipelineName, + }) + } + + if workflowAction != "" { + ls = labels.Merge(ls, map[string]string{ + WorkActionLabel: string(workflowAction), + }) + } + return ls +} diff --git a/api/v1alpha1/pipeline_types_test.go b/api/v1alpha1/pipeline_types_test.go new file mode 100644 index 00000000..3233e67d --- /dev/null +++ b/api/v1alpha1/pipeline_types_test.go @@ -0,0 +1,760 @@ +package v1alpha1_test + +import ( + "encoding/json" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/gstruct" + "github.com/syntasso/kratix/api/v1alpha1" + "github.com/syntasso/kratix/lib/hash" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "strings" +) + +var _ = Describe("Pipeline", func() { + var ( + pipeline *v1alpha1.Pipeline + promise *v1alpha1.Promise + promiseCrd *apiextensionsv1.CustomResourceDefinition + resourceRequest *unstructured.Unstructured + ) + + BeforeEach(func() { + secretRef := &corev1.SecretEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "secretName"}} + + pipeline = &v1alpha1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pipelineName", + }, + Spec: v1alpha1.PipelineSpec{ + Containers: []v1alpha1.Container{ + { + Name: "container-0", + Image: "container-0-image", + Args: []string{"arg1", "arg2"}, + Command: []string{"command1", "command2"}, + Env: []corev1.EnvVar{{Name: "env1", Value: "value1"}}, + EnvFrom: []corev1.EnvFromSource{{Prefix: "prefix1", SecretRef: secretRef}}, + VolumeMounts: []corev1.VolumeMount{{Name: "customVolume", MountPath: "/mount/path"}}, + ImagePullPolicy: "Always", + }, + {Name: "container-1", Image: "container-1-image"}, + }, + Volumes: []corev1.Volume{{Name: "customVolume", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}}, + ImagePullSecrets: []corev1.LocalObjectReference{{Name: "imagePullSecret"}}, + }, + } + promiseCrd = &apiextensionsv1.CustomResourceDefinition{ + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "promise.crd.group", + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "promiseCrdPlural", + }, + }, + } + + rawCrd, err := json.Marshal(promiseCrd) + Expect(err).ToNot(HaveOccurred()) + promise = &v1alpha1.Promise{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "fake.promise.group/v1", + Kind: "promisekind", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "promiseName", + }, + Spec: v1alpha1.PromiseSpec{ + DestinationSelectors: []v1alpha1.PromiseScheduling{ + {MatchLabels: map[string]string{"label": "value"}}, + {MatchLabels: map[string]string{"another-label": "another-value"}}, + }, + API: &runtime.RawExtension{Raw: rawCrd}, + }, + } + + resourceRequest = &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "fake.resource.group/v1", + "kind": "promisekind", + "metadata": map[string]interface{}{ + "name": "resourceName", + }, + }, + } + }) + + Describe("Pipeline Factory Constructors", func() { + Describe("ForPromise", func() { + It("sets the appropriate fields", func() { + f := pipeline.ForPromise(promise, v1alpha1.WorkflowActionConfigure) + Expect(f).ToNot(BeNil()) + Expect(f.ID).To(Equal(promise.GetName() + "-promise-pipeline")) + Expect(f.Promise).To(Equal(promise)) + Expect(f.ResourceRequest).To(BeNil()) + Expect(f.Pipeline).To(Equal(pipeline)) + Expect(f.Namespace).To(Equal(v1alpha1.SystemNamespace)) + Expect(f.WorkflowAction).To(Equal(v1alpha1.WorkflowActionConfigure)) + Expect(f.WorkflowType).To(Equal(v1alpha1.WorkflowTypePromise)) + Expect(f.ResourceWorkflow).To(BeFalse()) + }) + }) + + Describe("ForResource", func() { + It("sets the appropriate fields", func() { + f := pipeline.ForResource(promise, v1alpha1.WorkflowActionConfigure, resourceRequest) + Expect(f).ToNot(BeNil()) + Expect(f.ID).To(Equal(promise.GetName() + "-resource-pipeline")) + Expect(f.Promise).To(Equal(promise)) + Expect(f.ResourceRequest).To(Equal(resourceRequest)) + Expect(f.Pipeline).To(Equal(pipeline)) + Expect(f.Namespace).To(Equal(resourceRequest.GetNamespace())) + Expect(f.WorkflowAction).To(Equal(v1alpha1.WorkflowActionConfigure)) + Expect(f.WorkflowType).To(Equal(v1alpha1.WorkflowTypeResource)) + Expect(f.ResourceWorkflow).To(BeTrue()) + }) + }) + }) + + Describe("PipelineFactory", func() { + var ( + factory *v1alpha1.PipelineFactory + ) + + BeforeEach(func() { + factory = &v1alpha1.PipelineFactory{ + ID: "factoryID", + Namespace: "factoryNamespace", + Promise: promise, + WorkflowAction: "fakeAction", + WorkflowType: "fakeType", + ResourceRequest: resourceRequest, + Pipeline: pipeline, + } + }) + + Describe("Resources", func() { + When("building resources for the configure action", func() { + It("should return a list of resources", func() { + factory.WorkflowAction = v1alpha1.WorkflowActionConfigure + env := []corev1.EnvVar{{Name: "env1", Value: "value1"}} + role, err := factory.ObjectRole() + Expect(err).ToNot(HaveOccurred()) + serviceAccount := factory.ServiceAccount() + configMap, err := factory.ConfigMap(promise.GetWorkloadGroupScheduling()) + Expect(err).ToNot(HaveOccurred()) + job, err := factory.PipelineJob(configMap, serviceAccount, env) + Expect(err).ToNot(HaveOccurred()) + + resources, err := factory.Resources(env) + Expect(err).ToNot(HaveOccurred()) + Expect(resources.Name).To(Equal(pipeline.GetName())) + Expect(resources.RequiredResources).To(HaveLen(4)) + Expect(resources.RequiredResources).To(ConsistOf( + serviceAccount, role, factory.ObjectRoleBinding(role.GetName(), serviceAccount), configMap, + )) + Expect(resources.Job.Name).To(HavePrefix("kratix-%s-%s", promise.GetName(), pipeline.GetName())) + job.Name = resources.Job.Name + Expect(resources.Job).To(Equal(job)) + }) + }) + + When("building resources for the delete action", func() { + It("should return a list of resources", func() { + factory.WorkflowAction = v1alpha1.WorkflowActionDelete + env := []corev1.EnvVar{{Name: "env1", Value: "value1"}} + role, err := factory.ObjectRole() + Expect(err).ToNot(HaveOccurred()) + serviceAccount := factory.ServiceAccount() + configMap, err := factory.ConfigMap(promise.GetWorkloadGroupScheduling()) + Expect(err).ToNot(HaveOccurred()) + job, err := factory.PipelineJob(configMap, serviceAccount, env) + Expect(err).ToNot(HaveOccurred()) + + resources, err := factory.Resources(env) + Expect(err).ToNot(HaveOccurred()) + Expect(resources.Name).To(Equal(pipeline.GetName())) + Expect(resources.RequiredResources).To(HaveLen(3)) + Expect(resources.RequiredResources).To(ConsistOf( + serviceAccount, role, factory.ObjectRoleBinding(role.GetName(), serviceAccount), + )) + + Expect(resources.Job.Name).To(HavePrefix("kratix-%s-%s", promise.GetName(), pipeline.GetName())) + job.Name = resources.Job.Name + Expect(resources.Job).To(Equal(job)) + }) + }) + }) + + Describe("ServiceAccount", func() { + It("should return a service account", func() { + sa := factory.ServiceAccount() + Expect(sa).ToNot(BeNil()) + Expect(sa.GetName()).To(Equal(factory.ID)) + Expect(sa.GetNamespace()).To(Equal(factory.Namespace)) + Expect(sa.GetLabels()).To(HaveKeyWithValue(v1alpha1.PromiseNameLabel, promise.GetName())) + }) + }) + + Describe("ObjectRole", func() { + When("building a role for a promise pipeline", func() { + It("returns a cluster role", func() { + objectRole, err := factory.ObjectRole() + Expect(err).ToNot(HaveOccurred()) + Expect(objectRole).ToNot(BeNil()) + Expect(objectRole).To(BeAssignableToTypeOf(&rbacv1.ClusterRole{})) + + clusterRole := objectRole.(*rbacv1.ClusterRole) + Expect(clusterRole.GetName()).To(Equal(factory.ID)) + Expect(clusterRole.GetLabels()).To(HaveKeyWithValue(v1alpha1.PromiseNameLabel, promise.GetName())) + + Expect(clusterRole.Rules).To(ConsistOf(rbacv1.PolicyRule{ + APIGroups: []string{v1alpha1.GroupVersion.Group}, + Resources: []string{v1alpha1.PromisePlural, v1alpha1.PromisePlural + "/status", "works"}, + Verbs: []string{"get", "list", "update", "create", "patch"}, + })) + }) + }) + + When("building a role for a resource pipeline", func() { + It("returns a role", func() { + factory.ResourceWorkflow = true + + objectRole, err := factory.ObjectRole() + Expect(err).ToNot(HaveOccurred()) + Expect(objectRole).ToNot(BeNil()) + Expect(objectRole).To(BeAssignableToTypeOf(&rbacv1.Role{})) + + role := objectRole.(*rbacv1.Role) + Expect(role.GetName()).To(Equal(factory.ID)) + Expect(role.GetNamespace()).To(Equal(factory.Namespace)) + Expect(role.GetLabels()).To(HaveKeyWithValue(v1alpha1.PromiseNameLabel, promise.GetName())) + + Expect(role.Rules).To(ConsistOf(rbacv1.PolicyRule{ + APIGroups: []string{promiseCrd.Spec.Group}, + Resources: []string{promiseCrd.Spec.Names.Plural, promiseCrd.Spec.Names.Plural + "/status"}, + Verbs: []string{"get", "list", "update", "create", "patch"}, + }, rbacv1.PolicyRule{ + APIGroups: []string{v1alpha1.GroupVersion.Group}, + Resources: []string{"works"}, + Verbs: []string{"*"}, + })) + }) + }) + }) + + Describe("ObjectRoleBinding", func() { + var serviceAccount *corev1.ServiceAccount + BeforeEach(func() { + serviceAccount = &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "serviceAccountName", + Namespace: "serviceAccountNamespace", + }, + } + }) + + When("building a role binding for a promise pipeline", func() { + It("returns a cluster role binding", func() { + objectRoleBinding := factory.ObjectRoleBinding("aClusterRole", serviceAccount) + Expect(objectRoleBinding).ToNot(BeNil()) + Expect(objectRoleBinding).To(BeAssignableToTypeOf(&rbacv1.ClusterRoleBinding{})) + + clusterRoleBinding := objectRoleBinding.(*rbacv1.ClusterRoleBinding) + Expect(clusterRoleBinding.GetName()).To(Equal(factory.ID)) + Expect(clusterRoleBinding.GetLabels()).To(HaveKeyWithValue(v1alpha1.PromiseNameLabel, promise.GetName())) + + Expect(clusterRoleBinding.RoleRef).To(Equal(rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "ClusterRole", + Name: "aClusterRole", + })) + + Expect(clusterRoleBinding.Subjects).To(ConsistOf(rbacv1.Subject{ + Kind: rbacv1.ServiceAccountKind, + Namespace: serviceAccount.GetNamespace(), + Name: serviceAccount.GetName(), + })) + }) + }) + + When("building a role for a resource pipeline", func() { + It("returns a role", func() { + factory.ResourceWorkflow = true + + objectRoleBinding := factory.ObjectRoleBinding("aNamespacedRole", serviceAccount) + Expect(objectRoleBinding).ToNot(BeNil()) + Expect(objectRoleBinding).To(BeAssignableToTypeOf(&rbacv1.RoleBinding{})) + + roleBinding := objectRoleBinding.(*rbacv1.RoleBinding) + Expect(roleBinding.GetName()).To(Equal(factory.ID)) + Expect(roleBinding.GetNamespace()).To(Equal(factory.Namespace)) + Expect(roleBinding.GetLabels()).To(HaveKeyWithValue(v1alpha1.PromiseNameLabel, promise.GetName())) + + Expect(roleBinding.RoleRef).To(Equal(rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "Role", + Name: "aNamespacedRole", + })) + Expect(roleBinding.Subjects).To(ConsistOf(rbacv1.Subject{ + Kind: rbacv1.ServiceAccountKind, + Namespace: serviceAccount.GetNamespace(), + Name: serviceAccount.GetName(), + })) + }) + }) + }) + + Describe("ConfigMap", func() { + It("should return a config map", func() { + workloadGroupScheduling := []v1alpha1.WorkloadGroupScheduling{ + {MatchLabels: map[string]string{"label": "value"}, Source: "promise"}, + {MatchLabels: map[string]string{"another-label": "another-value"}, Source: "resource"}, + } + cm, err := factory.ConfigMap(workloadGroupScheduling) + Expect(err).ToNot(HaveOccurred()) + Expect(cm).ToNot(BeNil()) + Expect(cm.GetName()).To(Equal("destination-selectors-" + factory.Promise.GetName())) + Expect(cm.GetNamespace()).To(Equal(factory.Namespace)) + Expect(cm.GetLabels()).To(HaveKeyWithValue(v1alpha1.PromiseNameLabel, promise.GetName())) + Expect(cm.Data).To(HaveKeyWithValue("destinationSelectors", "- matchlabels:\n label: value\n source: promise\n- matchlabels:\n another-label: another-value\n source: resource\n")) + }) + }) + + Describe("DefaultVolumes", func() { + It("should return a list of default volumes", func() { + configMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "aConfigMap", + }, + } + volumes := factory.DefaultVolumes(configMap) + Expect(volumes).To(HaveLen(1)) + Expect(volumes).To(ConsistOf(corev1.Volume{ + Name: "promise-scheduling", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: configMap.GetName()}, + Items: []corev1.KeyToPath{ + {Key: "destinationSelectors", Path: "promise-scheduling"}, + }, + }, + }, + })) + }) + }) + + Describe("DefaultPipelineVolumes", func() { + It("should return a list of default pipeline volumes", func() { + volumes, volumeMounts := factory.DefaultPipelineVolumes() + Expect(volumes).To(HaveLen(3)) + Expect(volumeMounts).To(HaveLen(3)) + emptyDir := corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}} + Expect(volumes).To(ConsistOf( + corev1.Volume{Name: "shared-input", VolumeSource: emptyDir}, + corev1.Volume{Name: "shared-output", VolumeSource: emptyDir}, + corev1.Volume{Name: "shared-metadata", VolumeSource: emptyDir}, + )) + Expect(volumeMounts).To(ConsistOf( + corev1.VolumeMount{Name: "shared-input", MountPath: "/kratix/input", ReadOnly: true}, + corev1.VolumeMount{Name: "shared-output", MountPath: "/kratix/output"}, + corev1.VolumeMount{Name: "shared-metadata", MountPath: "/kratix/metadata"}, + )) + }) + }) + + Describe("DefaultEnvVars", func() { + It("should return a list of default environment variables", func() { + envVars := factory.DefaultEnvVars() + Expect(envVars).To(HaveLen(3)) + Expect(envVars).To(ConsistOf( + corev1.EnvVar{Name: "KRATIX_WORKFLOW_ACTION", Value: "fakeAction"}, + corev1.EnvVar{Name: "KRATIX_WORKFLOW_TYPE", Value: "fakeType"}, + corev1.EnvVar{Name: "KRATIX_PROMISE_NAME", Value: promise.GetName()}, + )) + }) + }) + + Describe("ReaderContainer", func() { + When("building the reader container for a promise pipeline", func() { + It("returns a the reader container with the promise information", func() { + container := factory.ReaderContainer() + Expect(container).ToNot(BeNil()) + Expect(container.Name).To(Equal("reader")) + Expect(container.Command).To(Equal([]string{"sh", "-c", "reader"})) + Expect(container.Image).To(Equal(workCreatorImage)) + Expect(container.Env).To(ConsistOf( + corev1.EnvVar{Name: "OBJECT_KIND", Value: promise.GroupVersionKind().Kind}, + corev1.EnvVar{Name: "OBJECT_GROUP", Value: promise.GroupVersionKind().Group}, + corev1.EnvVar{Name: "OBJECT_NAME", Value: promise.GetName()}, + corev1.EnvVar{Name: "OBJECT_NAMESPACE", Value: factory.Namespace}, + corev1.EnvVar{Name: "KRATIX_WORKFLOW_TYPE", Value: string(factory.WorkflowType)}, + )) + Expect(container.VolumeMounts).To(ConsistOf( + corev1.VolumeMount{Name: "shared-input", MountPath: "/kratix/input"}, + corev1.VolumeMount{Name: "shared-output", MountPath: "/kratix/output"}, + )) + }) + }) + + When("building the reader container for a resource pipeline", func() { + It("returns a the reader container with the resource information", func() { + factory.ResourceWorkflow = true + container := factory.ReaderContainer() + Expect(container).ToNot(BeNil()) + Expect(container.Name).To(Equal("reader")) + Expect(container.Image).To(Equal(workCreatorImage)) + Expect(container.Env).To(ConsistOf( + corev1.EnvVar{Name: "OBJECT_KIND", Value: resourceRequest.GroupVersionKind().Kind}, + corev1.EnvVar{Name: "OBJECT_GROUP", Value: resourceRequest.GroupVersionKind().Group}, + corev1.EnvVar{Name: "OBJECT_NAME", Value: resourceRequest.GetName()}, + corev1.EnvVar{Name: "OBJECT_NAMESPACE", Value: factory.Namespace}, + corev1.EnvVar{Name: "KRATIX_WORKFLOW_TYPE", Value: string(factory.WorkflowType)}, + )) + Expect(container.VolumeMounts).To(ConsistOf( + corev1.VolumeMount{Name: "shared-input", MountPath: "/kratix/input"}, + corev1.VolumeMount{Name: "shared-output", MountPath: "/kratix/output"}, + )) + }) + }) + }) + + Describe("WorkCreatorContainer", func() { + When("building the work creator container for a promise pipeline", func() { + It("returns a the work creator container with the appropriate command", func() { + expectedFlags := strings.Join([]string{ + "-input-directory", "/work-creator-files", + "-promise-name", promise.GetName(), + "-pipeline-name", pipeline.GetName(), + "-namespace", factory.Namespace, + "-workflow-type", string(factory.WorkflowType), + }, " ") + container := factory.WorkCreatorContainer() + Expect(container).ToNot(BeNil()) + Expect(container.Name).To(Equal("work-writer")) + Expect(container.Image).To(Equal(workCreatorImage)) + Expect(container.Command).To(Equal([]string{"sh", "-c", "./work-creator " + expectedFlags})) + Expect(container.VolumeMounts).To(ConsistOf( + corev1.VolumeMount{Name: "shared-output", MountPath: "/work-creator-files/input"}, + corev1.VolumeMount{Name: "shared-metadata", MountPath: "/work-creator-files/metadata"}, + corev1.VolumeMount{Name: "promise-scheduling", MountPath: "/work-creator-files/kratix-system"}, + )) + + }) + }) + When("building the work creator container for a resource pipeline", func() { + It("returns a the work creator container with the appropriate command", func() { + factory.ResourceWorkflow = true + + expectedFlags := strings.Join([]string{ + "-input-directory", "/work-creator-files", + "-promise-name", promise.GetName(), + "-pipeline-name", pipeline.GetName(), + "-namespace", factory.Namespace, + "-workflow-type", string(factory.WorkflowType), + "-resource-name", resourceRequest.GetName(), + }, " ") + container := factory.WorkCreatorContainer() + Expect(container).ToNot(BeNil()) + Expect(container.Name).To(Equal("work-writer")) + Expect(container.Image).To(Equal(workCreatorImage)) + Expect(container.Command).To(Equal([]string{"sh", "-c", "./work-creator " + expectedFlags})) + Expect(container.VolumeMounts).To(ConsistOf( + corev1.VolumeMount{Name: "shared-output", MountPath: "/work-creator-files/input"}, + corev1.VolumeMount{Name: "shared-metadata", MountPath: "/work-creator-files/metadata"}, + corev1.VolumeMount{Name: "promise-scheduling", MountPath: "/work-creator-files/kratix-system"}, + )) + }) + }) + }) + + Describe("PipelineContainers", func() { + var defaultEnvVars []corev1.EnvVar + var defaultVolumes []corev1.Volume + var defaultVolumeMounts []corev1.VolumeMount + + BeforeEach(func() { + defaultEnvVars = factory.DefaultEnvVars() + defaultVolumes, defaultVolumeMounts = factory.DefaultPipelineVolumes() + }) + It("returns the pipeline containers and volumes", func() { + containers, volumes := factory.PipelineContainers() + Expect(containers).To(HaveLen(2)) + Expect(volumes).To(HaveLen(4)) + + expectedContainer0 := pipeline.Spec.Containers[0] + Expect(containers[0]).To(MatchFields(IgnoreExtras, Fields{ + "Name": Equal(expectedContainer0.Name), + "Image": Equal(expectedContainer0.Image), + "Args": Equal(expectedContainer0.Args), + "Command": Equal(expectedContainer0.Command), + "Env": Equal(append(defaultEnvVars, expectedContainer0.Env...)), + "EnvFrom": Equal(expectedContainer0.EnvFrom), + "VolumeMounts": Equal(append(defaultVolumeMounts, expectedContainer0.VolumeMounts...)), + "ImagePullPolicy": Equal(expectedContainer0.ImagePullPolicy), + })) + Expect(volumes).To(Equal(append(defaultVolumes, pipeline.Spec.Volumes...))) + + expectedContainer1 := pipeline.Spec.Containers[1] + Expect(containers[1]).To(MatchFields(IgnoreExtras, Fields{ + "Name": Equal(expectedContainer1.Name), + "Image": Equal(expectedContainer1.Image), + "Args": BeNil(), + "Command": BeNil(), + "Env": Equal(defaultEnvVars), + "EnvFrom": BeNil(), + "VolumeMounts": Equal(defaultVolumeMounts), + "ImagePullPolicy": BeEmpty(), + })) + }) + }) + + Describe("StatusWriterContainer", func() { + var obj *unstructured.Unstructured + var envVars []corev1.EnvVar + BeforeEach(func() { + obj = &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "some.api.group/someVersion", + "kind": "somekind", + "metadata": map[string]interface{}{ + "name": "someName", + "namespace": "someNamespace", + }, + }, + } + + envVars = []corev1.EnvVar{ + {Name: "env1", Value: "value1"}, + {Name: "env2", Value: "value2"}, + } + }) + + It("returns the appropriate container", func() { + container := factory.StatusWriterContainer(obj, envVars) + + Expect(container).ToNot(BeNil()) + Expect(container.Name).To(Equal("status-writer")) + Expect(container.Image).To(Equal(workCreatorImage)) + Expect(container.Command).To(Equal([]string{"sh", "-c", "update-status"})) + Expect(container.Env).To(ConsistOf( + corev1.EnvVar{Name: "OBJECT_KIND", Value: obj.GroupVersionKind().Kind}, + corev1.EnvVar{Name: "OBJECT_GROUP", Value: obj.GroupVersionKind().Group}, + corev1.EnvVar{Name: "OBJECT_NAME", Value: obj.GetName()}, + corev1.EnvVar{Name: "OBJECT_NAMESPACE", Value: factory.Namespace}, + corev1.EnvVar{Name: "env1", Value: "value1"}, + corev1.EnvVar{Name: "env2", Value: "value2"}, + )) + Expect(container.VolumeMounts).To(ConsistOf( + corev1.VolumeMount{Name: "shared-metadata", MountPath: "/work-creator-files/metadata"}, + )) + }) + }) + + Describe("PipelineJob", func() { + var ( + serviceAccount *corev1.ServiceAccount + configMap *corev1.ConfigMap + envVars []corev1.EnvVar + ) + BeforeEach(func() { + var err error + serviceAccount = factory.ServiceAccount() + configMap, err = factory.ConfigMap(promise.GetWorkloadGroupScheduling()) + Expect(err).ToNot(HaveOccurred()) + envVars = []corev1.EnvVar{ + {Name: "env1", Value: "value1"}, + {Name: "env2", Value: "value2"}, + } + }) + + When("building a job for a promise pipeline", func() { + When("building a job for the configure action", func() { + It("returns a job with the appropriate spec", func() { + job, err := factory.PipelineJob(configMap, serviceAccount, envVars) + Expect(job).ToNot(BeNil()) + Expect(err).ToNot(HaveOccurred()) + + Expect(job.GetName()).To(HavePrefix("kratix-%s-%s", promise.GetName(), pipeline.GetName())) + Expect(job.GetNamespace()).To(Equal(factory.Namespace)) + for _, definedLabels := range []map[string]string{job.GetLabels(), job.Spec.Template.GetLabels()} { + Expect(definedLabels).To(SatisfyAll( + HaveKeyWithValue(v1alpha1.PromiseNameLabel, promise.GetName()), + HaveKeyWithValue(v1alpha1.WorkTypeLabel, string(factory.WorkflowType)), + HaveKeyWithValue(v1alpha1.WorkActionLabel, string(factory.WorkflowAction)), + HaveKeyWithValue(v1alpha1.PipelineNameLabel, pipeline.GetName()), + HaveKeyWithValue(v1alpha1.KratixResourceHashLabel, promiseHash(promise)), + Not(HaveKey(v1alpha1.ResourceNameLabel)), + )) + } + podSpec := job.Spec.Template.Spec + Expect(podSpec.ServiceAccountName).To(Equal(serviceAccount.GetName())) + Expect(podSpec.ImagePullSecrets).To(ConsistOf(pipeline.Spec.ImagePullSecrets)) + Expect(podSpec.InitContainers).To(HaveLen(4)) + var initContainerNames []string + var initContainerImages []string + for _, container := range podSpec.InitContainers { + initContainerNames = append(initContainerNames, container.Name) + initContainerImages = append(initContainerImages, container.Image) + } + Expect(initContainerNames).To(Equal([]string{ + "reader", + pipeline.Spec.Containers[0].Name, + pipeline.Spec.Containers[1].Name, + "work-writer", + })) + Expect(initContainerImages).To(Equal([]string{ + workCreatorImage, + pipeline.Spec.Containers[0].Image, + pipeline.Spec.Containers[1].Image, + workCreatorImage, + })) + Expect(podSpec.Containers).To(HaveLen(1)) + Expect(podSpec.Containers[0].Name).To(Equal("status-writer")) + Expect(podSpec.RestartPolicy).To(Equal(corev1.RestartPolicyOnFailure)) + Expect(podSpec.Volumes).To(HaveLen(5)) + var volumeNames []string + for _, volume := range podSpec.Volumes { + volumeNames = append(volumeNames, volume.Name) + } + Expect(volumeNames).To(ConsistOf( + "promise-scheduling", + "shared-input", "shared-output", "shared-metadata", + pipeline.Spec.Volumes[0].Name, + )) + }) + }) + + When("building a job for the delete action", func() { + BeforeEach(func() { + factory.WorkflowAction = v1alpha1.WorkflowActionDelete + }) + + It("returns a job with the appropriate spec", func() { + job, err := factory.PipelineJob(configMap, serviceAccount, envVars) + Expect(job).ToNot(BeNil()) + Expect(err).ToNot(HaveOccurred()) + + podSpec := job.Spec.Template.Spec + Expect(podSpec.InitContainers).To(HaveLen(2)) + var initContainerNames []string + for _, container := range podSpec.InitContainers { + initContainerNames = append(initContainerNames, container.Name) + } + Expect(initContainerNames).To(Equal([]string{"reader", pipeline.Spec.Containers[0].Name})) + Expect(podSpec.Containers).To(HaveLen(1)) + Expect(podSpec.Containers[0].Name).To(Equal(pipeline.Spec.Containers[1].Name)) + Expect(podSpec.Containers[0].Image).To(Equal(pipeline.Spec.Containers[1].Image)) + }) + }) + }) + + When("building a job for a resource pipeline", func() { + BeforeEach(func() { + factory.ResourceWorkflow = true + }) + + When("building a job for the configure action", func() { + It("returns a job with the appropriate spec", func() { + job, err := factory.PipelineJob(configMap, serviceAccount, envVars) + Expect(job).ToNot(BeNil()) + Expect(err).ToNot(HaveOccurred()) + + Expect(job.GetName()).To(HavePrefix("kratix-%s-%s-%s", promise.GetName(), resourceRequest.GetName(), pipeline.GetName())) + Expect(job.GetNamespace()).To(Equal(factory.Namespace)) + for _, definedLabels := range []map[string]string{job.GetLabels(), job.Spec.Template.GetLabels()} { + Expect(definedLabels).To(SatisfyAll( + HaveKeyWithValue(v1alpha1.PromiseNameLabel, promise.GetName()), + HaveKeyWithValue(v1alpha1.WorkTypeLabel, string(factory.WorkflowType)), + HaveKeyWithValue(v1alpha1.WorkActionLabel, string(factory.WorkflowAction)), + HaveKeyWithValue(v1alpha1.PipelineNameLabel, pipeline.GetName()), + HaveKeyWithValue(v1alpha1.KratixResourceHashLabel, combinedHash(promiseHash(promise), resourceHash(resourceRequest))), + HaveKeyWithValue(v1alpha1.ResourceNameLabel, resourceRequest.GetName()), + )) + } + podSpec := job.Spec.Template.Spec + Expect(podSpec.ServiceAccountName).To(Equal(serviceAccount.GetName())) + Expect(podSpec.ImagePullSecrets).To(ConsistOf(pipeline.Spec.ImagePullSecrets)) + Expect(podSpec.InitContainers).To(HaveLen(4)) + var initContainerNames []string + var initContainerImages []string + for _, container := range podSpec.InitContainers { + initContainerNames = append(initContainerNames, container.Name) + initContainerImages = append(initContainerImages, container.Image) + } + Expect(initContainerNames).To(Equal([]string{ + "reader", + pipeline.Spec.Containers[0].Name, + pipeline.Spec.Containers[1].Name, + "work-writer", + })) + Expect(initContainerImages).To(Equal([]string{ + workCreatorImage, + pipeline.Spec.Containers[0].Image, + pipeline.Spec.Containers[1].Image, + workCreatorImage, + })) + Expect(podSpec.Containers).To(HaveLen(1)) + Expect(podSpec.Containers[0].Name).To(Equal("status-writer")) + Expect(podSpec.RestartPolicy).To(Equal(corev1.RestartPolicyOnFailure)) + Expect(podSpec.Volumes).To(HaveLen(5)) + var volumeNames []string + for _, volume := range podSpec.Volumes { + volumeNames = append(volumeNames, volume.Name) + } + Expect(volumeNames).To(ConsistOf( + "promise-scheduling", + "shared-input", "shared-output", "shared-metadata", + pipeline.Spec.Volumes[0].Name, + )) + }) + }) + + When("building a job for the delete action", func() { + BeforeEach(func() { + factory.WorkflowAction = v1alpha1.WorkflowActionDelete + }) + + It("returns a job with the appropriate spec", func() { + job, err := factory.PipelineJob(configMap, serviceAccount, envVars) + Expect(job).ToNot(BeNil()) + Expect(err).ToNot(HaveOccurred()) + + podSpec := job.Spec.Template.Spec + Expect(podSpec.InitContainers).To(HaveLen(2)) + var initContainerNames []string + for _, container := range podSpec.InitContainers { + initContainerNames = append(initContainerNames, container.Name) + } + Expect(initContainerNames).To(Equal([]string{"reader", pipeline.Spec.Containers[0].Name})) + Expect(podSpec.Containers).To(HaveLen(1)) + Expect(podSpec.Containers[0].Name).To(Equal(pipeline.Spec.Containers[1].Name)) + Expect(podSpec.Containers[0].Image).To(Equal(pipeline.Spec.Containers[1].Image)) + }) + }) + }) + }) + }) +}) + +func promiseHash(promise *v1alpha1.Promise) string { + uPromise, err := promise.ToUnstructured() + Expect(err).ToNot(HaveOccurred()) + h, err := hash.ComputeHashForResource(uPromise) + Expect(err).ToNot(HaveOccurred()) + return h +} + +func resourceHash(resource *unstructured.Unstructured) string { + h, err := hash.ComputeHashForResource(resource) + Expect(err).ToNot(HaveOccurred()) + return h +} + +func combinedHash(hashes ...string) string { + return hash.ComputeHash(strings.Join(hashes, "-")) +} diff --git a/api/v1alpha1/promise_types.go b/api/v1alpha1/promise_types.go index 6c35a717..c4099ca6 100644 --- a/api/v1alpha1/promise_types.go +++ b/api/v1alpha1/promise_types.go @@ -21,10 +21,12 @@ import ( "encoding/json" "fmt" "io" + "strconv" "github.com/go-logr/logr" "gopkg.in/yaml.v2" - v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" @@ -151,13 +153,6 @@ type Promise struct { Status PromiseStatus `json:"status,omitempty"` } -type PromisePipelines struct { - DeleteResource []Pipeline - ConfigureResource []Pipeline - ConfigurePromise []Pipeline - DeletePromise []Pipeline -} - var ErrNoAPI = fmt.Errorf("promise does not contain an API") func SquashPromiseScheduling(scheduling []PromiseScheduling) map[string]string { @@ -195,12 +190,12 @@ func (p *Promise) DoesNotContainAPI() bool { return p.Spec.API == nil || p.Spec.API.Raw == nil } -func (p *Promise) GetAPIAsCRD() (*v1.CustomResourceDefinition, error) { +func (p *Promise) GetAPIAsCRD() (*apiextensionsv1.CustomResourceDefinition, error) { if p.DoesNotContainAPI() { return nil, ErrNoAPI } - crd := v1.CustomResourceDefinition{} + crd := apiextensionsv1.CustomResourceDefinition{} if err := json.Unmarshal(p.Spec.API.Raw, &crd); err != nil { return nil, fmt.Errorf("api is not a valid CRD: %w", err) } @@ -244,69 +239,6 @@ func (p *Promise) ToUnstructured() (*unstructured.Unstructured, error) { return unstructuredPromise, nil } -func (p *Promise) GeneratePipelines(logger logr.Logger) (PromisePipelines, error) { - pipelineWorkflows := [][]unstructured.Unstructured{ - p.Spec.Workflows.Resource.Configure, - p.Spec.Workflows.Resource.Delete, - p.Spec.Workflows.Promise.Configure, - p.Spec.Workflows.Promise.Delete, - } - - var pipelines [][]Pipeline - for _, pipeline := range pipelineWorkflows { - p, err := generatePipeline(pipeline, logger) - if err != nil { - return PromisePipelines{}, err - } - pipelines = append(pipelines, p) - } - - return PromisePipelines{ - ConfigureResource: pipelines[0], - DeleteResource: pipelines[1], - ConfigurePromise: pipelines[2], - DeletePromise: pipelines[3], - }, nil -} - -func generatePipeline(pipelines []unstructured.Unstructured, logger logr.Logger) ([]Pipeline, error) { - if len(pipelines) == 0 { - return nil, nil - } - - //We only support 1 pipeline for now - ps := []Pipeline{} - for _, pipeline := range pipelines { - pipelineLogger := logger.WithValues( - "pipelineKind", pipeline.GetKind(), - "pipelineVersion", pipeline.GetAPIVersion(), - "pipelineName", pipeline.GetName()) - - if pipeline.GetKind() == "Pipeline" && pipeline.GetAPIVersion() == "platform.kratix.io/v1alpha1" { - jsonPipeline, err := pipeline.MarshalJSON() - if err != nil { - // TODO test - pipelineLogger.Error(err, "Failed marshalling pipeline to json") - return nil, err - } - - p := Pipeline{} - err = json.Unmarshal(jsonPipeline, &p) - if err != nil { - // TODO test - pipelineLogger.Error(err, "Failed unmarshalling pipeline") - return nil, err - } - ps = append(ps, p) - } else { - return nil, fmt.Errorf("unsupported pipeline %q (%s.%s)", - pipeline.GetName(), pipeline.GetKind(), pipeline.GetAPIVersion()) - } - } - return ps, nil - -} - func (d Dependencies) Marshal() ([]byte, error) { buf := new(bytes.Buffer) encoder := yaml.NewEncoder(buf) @@ -332,3 +264,109 @@ type PromiseList struct { func init() { SchemeBuilder.Register(&Promise{}, &PromiseList{}) } + +func (p *Promise) GetWorkloadGroupScheduling() []WorkloadGroupScheduling { + workloadGroupScheduling := []WorkloadGroupScheduling{} + for _, scheduling := range p.Spec.DestinationSelectors { + workloadGroupScheduling = append(workloadGroupScheduling, WorkloadGroupScheduling{ + MatchLabels: scheduling.MatchLabels, + Source: "promise", + }) + } + + return workloadGroupScheduling +} + +func (p *Promise) generatePipelinesObjects(workflowType Type, workflowAction Action, crd *apiextensionsv1.CustomResourceDefinition, resourceRequest *unstructured.Unstructured, logger logr.Logger) ([]PipelineJobResources, error) { + promisePipelines, err := NewPipelinesMap(p, logger) + if err != nil { + return nil, err + } + + var allResources []PipelineJobResources + pipelines := promisePipelines[workflowType][workflowAction] + + lastIndex := len(pipelines) - 1 + for i, pipe := range pipelines { + isLast := i == lastIndex + additionalJobEnv := []corev1.EnvVar{ + {Name: "IS_LAST_PIPELINE", Value: strconv.FormatBool(isLast)}, + } + + var resources PipelineJobResources + var err error + switch workflowType { + case WorkflowTypeResource: + resources, err = pipe.ForResource(p, workflowAction, resourceRequest).Resources(additionalJobEnv) + case WorkflowTypePromise: + resources, err = pipe.ForPromise(p, workflowAction).Resources(additionalJobEnv) + } + if err != nil { + return nil, err + } + + allResources = append(allResources, resources) + } + return allResources, nil +} + +func (p *Promise) GeneratePromisePipelines(workflowAction Action, logger logr.Logger) ([]PipelineJobResources, error) { + return p.generatePipelinesObjects(WorkflowTypePromise, workflowAction, nil, nil, logger) +} + +func (p *Promise) GenerateResourcePipelines(workflowAction Action, crd *apiextensionsv1.CustomResourceDefinition, resourceRequest *unstructured.Unstructured, logger logr.Logger) ([]PipelineJobResources, error) { + return p.generatePipelinesObjects(WorkflowTypeResource, workflowAction, crd, resourceRequest, logger) +} + +func (p *Promise) HasPipeline(workflowType Type, workflowAction Action) bool { + switch workflowType { + case WorkflowTypeResource: + switch workflowAction { + case WorkflowActionConfigure: + return len(p.Spec.Workflows.Resource.Configure) > 0 + case WorkflowActionDelete: + return len(p.Spec.Workflows.Resource.Delete) > 0 + } + case WorkflowTypePromise: + switch workflowAction { + case WorkflowActionConfigure: + return len(p.Spec.Workflows.Promise.Configure) > 0 + case WorkflowActionDelete: + return len(p.Spec.Workflows.Promise.Delete) > 0 + } + } + return false +} + +type pipelineMap map[Type]map[Action][]Pipeline + +func NewPipelinesMap(promise *Promise, logger logr.Logger) (pipelineMap, error) { + unstructuredMap := map[Type]map[Action][]unstructured.Unstructured{ + WorkflowTypeResource: { + WorkflowActionConfigure: promise.Spec.Workflows.Resource.Configure, + WorkflowActionDelete: promise.Spec.Workflows.Resource.Delete, + }, + WorkflowTypePromise: { + WorkflowActionConfigure: promise.Spec.Workflows.Promise.Configure, + WorkflowActionDelete: promise.Spec.Workflows.Promise.Delete, + }, + } + + pipelinesMap := map[Type]map[Action][]Pipeline{} + + for _, t := range []Type{WorkflowTypeResource, WorkflowTypePromise} { + if _, ok := pipelinesMap[t]; !ok { + pipelinesMap[t] = map[Action][]Pipeline{} + } + for _, a := range []Action{WorkflowActionConfigure, WorkflowActionDelete} { + pipelines, err := PipelinesFromUnstructured(unstructuredMap[t][a], logger) + if err != nil { + return nil, err + } + pipelinesMap[t][a] = pipelines + } + + } + + return pipelinesMap, nil +} diff --git a/api/v1alpha1/v1alpha1_suite_test.go b/api/v1alpha1/v1alpha1_suite_test.go index 5421f67e..1853f0b7 100644 --- a/api/v1alpha1/v1alpha1_suite_test.go +++ b/api/v1alpha1/v1alpha1_suite_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1alpha1_test import ( "testing" @@ -27,8 +27,11 @@ import ( // These tests use Ginkgo (BDD-style Go testing framework). Refer to // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. +var workCreatorImage = "work-creator:latest" + func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) + t.Setenv("WC_IMG", workCreatorImage) RunSpecs(t, "v1alpha1 API Suite") } diff --git a/api/v1alpha1/work_types.go b/api/v1alpha1/work_types.go index 18beccae..71b6a243 100644 --- a/api/v1alpha1/work_types.go +++ b/api/v1alpha1/work_types.go @@ -30,6 +30,7 @@ const ( ResourceNameLabel = KratixPrefix + "resource-name" PipelineNameLabel = KratixPrefix + "pipeline-name" WorkTypeLabel = KratixPrefix + "work-type" + WorkActionLabel = KratixPrefix + "work-action" WorkTypePromise = "promise" WorkTypeResource = "resource" diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 7ded466e..8992e29c 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -434,6 +434,35 @@ func (in *Pipeline) DeepCopy() *Pipeline { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineFactory) DeepCopyInto(out *PipelineFactory) { + *out = *in + if in.Promise != nil { + in, out := &in.Promise, &out.Promise + *out = new(Promise) + (*in).DeepCopyInto(*out) + } + if in.Pipeline != nil { + in, out := &in.Pipeline, &out.Pipeline + *out = new(Pipeline) + (*in).DeepCopyInto(*out) + } + if in.ResourceRequest != nil { + in, out := &in.ResourceRequest, &out.ResourceRequest + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineFactory. +func (in *PipelineFactory) DeepCopy() *PipelineFactory { + if in == nil { + return nil + } + out := new(PipelineFactory) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PipelineSpec) DeepCopyInto(out *PipelineSpec) { *out = *in @@ -527,49 +556,6 @@ func (in *PromiseList) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PromisePipelines) DeepCopyInto(out *PromisePipelines) { - *out = *in - if in.DeleteResource != nil { - in, out := &in.DeleteResource, &out.DeleteResource - *out = make([]Pipeline, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.ConfigureResource != nil { - in, out := &in.ConfigureResource, &out.ConfigureResource - *out = make([]Pipeline, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.ConfigurePromise != nil { - in, out := &in.ConfigurePromise, &out.ConfigurePromise - *out = make([]Pipeline, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.DeletePromise != nil { - in, out := &in.DeletePromise, &out.DeletePromise - *out = make([]Pipeline, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PromisePipelines. -func (in *PromisePipelines) DeepCopy() *PromisePipelines { - if in == nil { - return nil - } - out := new(PromisePipelines) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PromiseRelease) DeepCopyInto(out *PromiseRelease) { *out = *in diff --git a/controllers/dynamic_resource_request_controller.go b/controllers/dynamic_resource_request_controller.go index efdc67d9..e2f7daca 100644 --- a/controllers/dynamic_resource_request_controller.go +++ b/controllers/dynamic_resource_request_controller.go @@ -18,7 +18,6 @@ package controllers import ( "context" - "strconv" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -27,12 +26,10 @@ import ( "github.com/go-logr/logr" "github.com/syntasso/kratix/api/v1alpha1" - "github.com/syntasso/kratix/lib/pipeline" "github.com/syntasso/kratix/lib/resourceutil" "github.com/syntasso/kratix/lib/workflow" batchv1 "k8s.io/api/batch/v1" - v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -53,8 +50,6 @@ type DynamicResourceRequestController struct { GVK *schema.GroupVersionKind Scheme *runtime.Scheme PromiseIdentifier string - ConfigurePipelines []v1alpha1.Pipeline - DeletePipelines []v1alpha1.Pipeline Log logr.Logger UID string Enabled *bool @@ -89,11 +84,6 @@ func (r *DynamicResourceRequestController) Reconcile(ctx context.Context, req ct logger.Error(err, "Failed getting Promise") return ctrl.Result{}, err } - unstructuredPromise, err := promise.ToUnstructured() - if err != nil { - logger.Error(err, "Failed converting Promise to Unstructured") - return ctrl.Result{}, err - } if err := r.Client.Get(ctx, req.NamespacedName, rr); err != nil { if errors.IsNotFound(err) { @@ -124,7 +114,7 @@ func (r *DynamicResourceRequestController) Reconcile(ctx context.Context, req ct } if !rr.GetDeletionTimestamp().IsZero() { - return r.deleteResources(opts, rr, resourceRequestIdentifier) + return r.deleteResources(opts, promise, rr, resourceRequestIdentifier) } if !*r.CanCreateResources { @@ -148,39 +138,12 @@ func (r *DynamicResourceRequestController) Reconcile(ctx context.Context, req ct return addFinalizers(opts, rr, []string{workFinalizer, removeAllWorkflowJobsFinalizer, runDeleteWorkflowsFinalizer}) } - var pipelines []workflow.Pipeline - for i, p := range r.ConfigurePipelines { - pipelineResources, err := pipeline.NewConfigureResource( - rr, - unstructuredPromise, - r.CRD.Spec.Names.Plural, - p, - resourceRequestIdentifier, - r.PromiseIdentifier, - r.PromiseDestinationSelectors, - opts.logger, - ) - if err != nil { - return ctrl.Result{}, err - } - - isLast := i == len(r.ConfigurePipelines)-1 - job := pipelineResources[4].(*batchv1.Job) - job.Spec.Template.Spec.Containers[0].Env = append(job.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{ - Name: "IS_LAST_PIPELINE", - Value: strconv.FormatBool(isLast), - }) - - //TODO smelly, refactor. Should we merge the lib/pipeline package with lib/workflow? - //TODO if we dont do that, backfil unit tests for dynamic and promise controllers to assert the job is correct - pipelines = append(pipelines, workflow.Pipeline{ - Job: job, - JobRequiredResources: pipelineResources[0:4], - Name: p.Name, - }) + pipelineResources, err := promise.GenerateResourcePipelines(v1alpha1.WorkflowActionConfigure, r.CRD, rr, logger) + if err != nil { + return ctrl.Result{}, err } - jobOpts := workflow.NewOpts(ctx, r.Client, logger, rr, pipelines, "resource") + jobOpts := workflow.NewOpts(ctx, r.Client, logger, rr, pipelineResources, "resource") requeue, err := reconcileConfigure(jobOpts) if err != nil { @@ -198,26 +161,18 @@ func (r *DynamicResourceRequestController) Reconcile(ctx context.Context, req ct return ctrl.Result{}, nil } -func (r *DynamicResourceRequestController) deleteResources(o opts, resourceRequest *unstructured.Unstructured, resourceRequestIdentifier string) (ctrl.Result, error) { +func (r *DynamicResourceRequestController) deleteResources(o opts, promise *v1alpha1.Promise, resourceRequest *unstructured.Unstructured, resourceRequestIdentifier string) (ctrl.Result, error) { if resourceutil.FinalizersAreDeleted(resourceRequest, rrFinalizers) { return ctrl.Result{}, nil } if controllerutil.ContainsFinalizer(resourceRequest, runDeleteWorkflowsFinalizer) { - var pipelines []workflow.Pipeline - for _, p := range r.DeletePipelines { - pipelineResources := pipeline.NewDeleteResource( - resourceRequest, p, resourceRequestIdentifier, r.PromiseIdentifier, r.CRD.Spec.Names.Plural, - ) - - pipelines = append(pipelines, workflow.Pipeline{ - Job: pipelineResources[3].(*batchv1.Job), - JobRequiredResources: pipelineResources[0:3], - Name: p.Name, - }) + pipelineResources, err := promise.GenerateResourcePipelines(v1alpha1.WorkflowActionDelete, r.CRD, resourceRequest, o.logger) + if err != nil { + return ctrl.Result{}, err } - jobOpts := workflow.NewOpts(o.ctx, o.client, o.logger, resourceRequest, pipelines, "resource") + jobOpts := workflow.NewOpts(o.ctx, o.client, o.logger, resourceRequest, pipelineResources, "resource") requeue, err := reconcileDelete(jobOpts) if err != nil { return ctrl.Result{}, err @@ -288,7 +243,11 @@ func (r *DynamicResourceRequestController) deleteWorkflows(o opts, resourceReque Kind: "Job", } - jobLabels := pipeline.LabelsForAllResourceWorkflows(resourceRequestIdentifier, r.PromiseIdentifier) + jobLabels := map[string]string{ + v1alpha1.PromiseNameLabel: r.PromiseIdentifier, + v1alpha1.ResourceNameLabel: resourceRequest.GetName(), + v1alpha1.WorkTypeLabel: v1alpha1.WorkTypeResource, + } resourcesRemaining, err := deleteAllResourcesWithKindMatchingLabel(o, jobGVK, jobLabels) if err != nil { diff --git a/controllers/dynamic_resource_request_controller_test.go b/controllers/dynamic_resource_request_controller_test.go index 13622845..875edb50 100644 --- a/controllers/dynamic_resource_request_controller_test.go +++ b/controllers/dynamic_resource_request_controller_test.go @@ -53,37 +53,12 @@ var _ = Describe("DynamicResourceRequestController", func() { enabled := true reconciler = &controllers.DynamicResourceRequestController{ - CanCreateResources: &enabled, - Client: fakeK8sClient, - Scheme: scheme.Scheme, - GVK: &rrGVK, - CRD: rrCRD, - PromiseIdentifier: promise.GetName(), - ConfigurePipelines: []v1alpha1.Pipeline{ - { - Spec: v1alpha1.PipelineSpec{ - Containers: []v1alpha1.Container{ - { - Name: "test", - Image: "configure:v0.1.0", - }, - }, - }, - }, - }, - DeletePipelines: []v1alpha1.Pipeline{ - { - Spec: v1alpha1.PipelineSpec{ - Containers: []v1alpha1.Container{ - { - Name: "test", - Image: "delete:v0.1.0", - }, - }, - }, - }, - }, - + CanCreateResources: &enabled, + Client: fakeK8sClient, + Scheme: scheme.Scheme, + GVK: &rrGVK, + CRD: rrCRD, + PromiseIdentifier: promise.GetName(), PromiseDestinationSelectors: promise.Spec.DestinationSelectors, // promiseWorkflowSelectors: work.GetDefaultScheduling("promise-workflow"), Log: l, @@ -130,7 +105,7 @@ var _ = Describe("DynamicResourceRequestController", func() { "kratix.io/promise-name": promise.GetName(), } - resources := reconcileConfigureOptsArg.Pipelines[0].JobRequiredResources + resources := reconcileConfigureOptsArg.Resources[0].RequiredResources By("creating a service account for pipeline", func() { Expect(resources[0]).To(BeAssignableToTypeOf(&v1.ServiceAccount{})) sa := resources[0].(*v1.ServiceAccount) diff --git a/controllers/promise_controller.go b/controllers/promise_controller.go index 8dc642f2..317e0665 100644 --- a/controllers/promise_controller.go +++ b/controllers/promise_controller.go @@ -19,8 +19,8 @@ package controllers import ( "context" "fmt" + "github.com/syntasso/kratix/lib/objectutil" "slices" - "strconv" "strings" "time" @@ -30,7 +30,6 @@ import ( "github.com/go-logr/logr" "github.com/syntasso/kratix/api/v1alpha1" - "github.com/syntasso/kratix/lib/pipeline" "github.com/syntasso/kratix/lib/resourceutil" "github.com/syntasso/kratix/lib/workflow" batchv1 "k8s.io/api/batch/v1" @@ -137,13 +136,8 @@ func (r *PromiseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct logger: logger, } - pipelines, err := promise.GeneratePipelines(logger) - if err != nil { - return ctrl.Result{}, err - } - if !promise.DeletionTimestamp.IsZero() { - return r.deletePromise(opts, promise, pipelines.DeletePromise) + return r.deletePromise(opts, promise) } if value, found := promise.Labels[v1alpha1.PromiseVersionLabel]; found { @@ -162,7 +156,7 @@ func (r *PromiseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct } //TODO handle removing finalizer - requeue, err := ensurePromiseDeleteWorkflowFinalizer(opts, promise, pipelines.DeletePromise) + requeue, err := ensurePromiseDeleteWorkflowFinalizer(opts, promise, promise.HasPipeline(v1alpha1.WorkflowTypePromise, v1alpha1.WorkflowActionDelete)) if err != nil { return ctrl.Result{}, err } @@ -208,7 +202,7 @@ func (r *PromiseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct return addFinalizers(opts, promise, []string{dependenciesCleanupFinalizer}) } - requeue, err = r.reconcileDependencies(opts, promise, pipelines.ConfigurePromise) + requeue, err = r.reconcileDependencies(opts, promise) if err != nil { return ctrl.Result{}, err } @@ -226,7 +220,7 @@ func (r *PromiseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct } } - err = r.ensureDynamicControllerIsStarted(promise, rrCRD, rrGVK, pipelines.ConfigureResource, pipelines.DeleteResource, &dynamicControllerCanCreateResources, logger) + err = r.ensureDynamicControllerIsStarted(promise, rrCRD, rrGVK, &dynamicControllerCanCreateResources, logger) if err != nil { return ctrl.Result{}, err } @@ -352,7 +346,7 @@ func (r *PromiseReconciler) generateStatusAndMarkRequirements(ctx context.Contex return promiseCondition, requirements } -func (r *PromiseReconciler) reconcileDependencies(o opts, promise *v1alpha1.Promise, configurePipeline []v1alpha1.Pipeline) (*ctrl.Result, error) { +func (r *PromiseReconciler) reconcileDependencies(o opts, promise *v1alpha1.Promise) (*ctrl.Result, error) { if len(promise.Spec.Dependencies) > 0 { o.logger.Info("Applying static dependencies for Promise", "promise", promise.GetName()) if err := r.applyWorkForStaticDependencies(o, promise); err != nil { @@ -368,7 +362,7 @@ func (r *PromiseReconciler) reconcileDependencies(o opts, promise *v1alpha1.Prom } } - if len(configurePipeline) == 0 { + if !promise.HasPipeline(v1alpha1.WorkflowTypePromise, v1alpha1.WorkflowActionConfigure) { return nil, nil } @@ -384,32 +378,12 @@ func (r *PromiseReconciler) reconcileDependencies(o opts, promise *v1alpha1.Prom return nil, err } - var pipelines []workflow.Pipeline - for i, p := range configurePipeline { - isLast := i == len(configurePipeline)-1 - pipelineResources, err := pipeline.NewConfigurePromise( - unstructuredPromise, - p, - promise.GetName(), - promise.Spec.DestinationSelectors, - o.logger, - ) - if err != nil { - return nil, err - } - job := pipelineResources[4].(*batchv1.Job) - job.Spec.Template.Spec.Containers[0].Env = append(job.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{ - Name: "IS_LAST_PIPELINE", - Value: strconv.FormatBool(isLast), - }) - pipelines = append(pipelines, workflow.Pipeline{ - Job: job, - JobRequiredResources: pipelineResources[0:4], - Name: p.Name, - }) + pipelineResources, err := promise.GeneratePromisePipelines(v1alpha1.WorkflowActionConfigure, o.logger) + if err != nil { + return nil, err } - jobOpts := workflow.NewOpts(o.ctx, o.client, o.logger, unstructuredPromise, pipelines, "promise") + jobOpts := workflow.NewOpts(o.ctx, o.client, o.logger, unstructuredPromise, pipelineResources, "promise") requeue, err := reconcileConfigure(jobOpts) if err != nil { @@ -446,15 +420,13 @@ func (r *PromiseReconciler) reconcileAllRRs(rrGVK schema.GroupVersionKind) error return nil } -func (r *PromiseReconciler) ensureDynamicControllerIsStarted(promise *v1alpha1.Promise, rrCRD *apiextensionsv1.CustomResourceDefinition, rrGVK schema.GroupVersionKind, configurePipelines, deletePipelines []v1alpha1.Pipeline, canCreateResources *bool, logger logr.Logger) error { +func (r *PromiseReconciler) ensureDynamicControllerIsStarted(promise *v1alpha1.Promise, rrCRD *apiextensionsv1.CustomResourceDefinition, rrGVK schema.GroupVersionKind, canCreateResources *bool, logger logr.Logger) error { // The Dynamic Controller needs to be started once and only once. if r.dynamicControllerHasAlreadyStarted(promise) { logger.Info("dynamic controller already started, ensuring it is up to date") dynamicController := r.StartedDynamicControllers[string(promise.GetUID())] - dynamicController.DeletePipelines = deletePipelines - dynamicController.ConfigurePipelines = configurePipelines dynamicController.GVK = &rrGVK dynamicController.CRD = rrCRD @@ -475,8 +447,6 @@ func (r *PromiseReconciler) ensureDynamicControllerIsStarted(promise *v1alpha1.P GVK: &rrGVK, CRD: rrCRD, PromiseIdentifier: promise.GetName(), - ConfigurePipelines: configurePipelines, - DeletePipelines: deletePipelines, PromiseDestinationSelectors: promise.Spec.DestinationSelectors, Log: r.Log.WithName(promise.GetName()), UID: string(promise.GetUID())[0:5], @@ -645,7 +615,7 @@ func (r *PromiseReconciler) updateStatus(promise *v1alpha1.Promise, kind, group, return true, r.Client.Status().Update(context.TODO(), promise) } -func (r *PromiseReconciler) deletePromise(o opts, promise *v1alpha1.Promise, deletePipelines []v1alpha1.Pipeline) (ctrl.Result, error) { +func (r *PromiseReconciler) deletePromise(o opts, promise *v1alpha1.Promise) (ctrl.Result, error) { o.logger.Info("finalizers existing", "finalizers", promise.GetFinalizers()) if resourceutil.FinalizersAreDeleted(promise, promiseFinalizers) { return ctrl.Result{}, nil @@ -656,19 +626,10 @@ func (r *PromiseReconciler) deletePromise(o opts, promise *v1alpha1.Promise, del if err != nil { return ctrl.Result{}, err } - var pipelines []workflow.Pipeline - for _, p := range deletePipelines { - pipelineResources := pipeline.NewDeletePromise( - unstructuredPromise, p, - ) - - pipelines = append(pipelines, workflow.Pipeline{ - Job: pipelineResources[3].(*batchv1.Job), - JobRequiredResources: pipelineResources[0:3], - Name: p.Name, - }) + pipelines, err := promise.GeneratePromisePipelines(v1alpha1.WorkflowActionDelete, o.logger) + if err != nil { + return ctrl.Result{}, err } - jobOpts := workflow.NewOpts(o.ctx, o.client, o.logger, unstructuredPromise, pipelines, "promise") requeue, err := reconcileDelete(jobOpts) @@ -749,7 +710,10 @@ func (r *PromiseReconciler) deletePromiseWorkflowJobs(o opts, promise *v1alpha1. Kind: "Job", } - jobLabels := pipeline.LabelsForAllPromiseWorkflows(promise.GetName()) + jobLabels := map[string]string{ + v1alpha1.PromiseNameLabel: promise.GetName(), + v1alpha1.WorkTypeLabel: v1alpha1.WorkTypePromise, + } resourcesRemaining, err := deleteAllResourcesWithKindMatchingLabel(o, jobGVK, jobLabels) if err != nil { @@ -806,13 +770,8 @@ func (r *PromiseReconciler) deleteResourceRequests(o opts, promise *v1alpha1.Pro return err } - pipelines, err := promise.GeneratePipelines(o.logger) - if err != nil { - return err - } - var canCreateResources bool - err = r.ensureDynamicControllerIsStarted(promise, rrCRD, rrGVK, pipelines.ConfigureResource, pipelines.DeleteResource, &canCreateResources, o.logger) + err = r.ensureDynamicControllerIsStarted(promise, rrCRD, rrGVK, &canCreateResources, o.logger) if err != nil { return err } @@ -891,8 +850,7 @@ func (r *PromiseReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } -func ensurePromiseDeleteWorkflowFinalizer(o opts, promise *v1alpha1.Promise, deletePipelines []v1alpha1.Pipeline) (*ctrl.Result, error) { - promiseDeletePipelineExists := deletePipelines != nil +func ensurePromiseDeleteWorkflowFinalizer(o opts, promise *v1alpha1.Promise, promiseDeletePipelineExists bool) (*ctrl.Result, error) { promiseContainsDeleteWorkflowsFinalizer := controllerutil.ContainsFinalizer(promise, runDeleteWorkflowsFinalizer) promiseContainsRemoveAllWorkflowJobsFinalizer := controllerutil.ContainsFinalizer(promise, removeAllWorkflowJobsFinalizer) @@ -999,7 +957,7 @@ func setStatusFieldsOnCRD(rrCRD *apiextensionsv1.CustomResourceDefinition) { } func (r *PromiseReconciler) applyWorkForStaticDependencies(o opts, promise *v1alpha1.Promise) error { - name := resourceutil.GenerateObjectName(promise.GetName() + "-static-deps") + name := objectutil.GenerateObjectName(promise.GetName() + "-static-deps") work, err := v1alpha1.NewPromiseDependenciesWork(promise, name) if err != nil { return err diff --git a/controllers/promise_controller_test.go b/controllers/promise_controller_test.go index 90ae9eb3..e9e2b3f3 100644 --- a/controllers/promise_controller_test.go +++ b/controllers/promise_controller_test.go @@ -404,7 +404,7 @@ var _ = Describe("PromiseController", func() { Expect(promise.Finalizers).To(ContainElement("kratix.io/workflows-cleanup")) }) - resources := reconcileConfigureOptsArg.Pipelines[0].JobRequiredResources + resources := reconcileConfigureOptsArg.Resources[0].RequiredResources By("creates a service account for pipeline", func() { Expect(resources[0]).To(BeAssignableToTypeOf(&v1.ServiceAccount{})) sa := resources[0].(*v1.ServiceAccount) diff --git a/lib/resourceutil/name.go b/lib/objectutil/name.go similarity index 93% rename from lib/resourceutil/name.go rename to lib/objectutil/name.go index f46f4599..6dde256f 100644 --- a/lib/resourceutil/name.go +++ b/lib/objectutil/name.go @@ -1,4 +1,4 @@ -package resourceutil +package objectutil import ( "k8s.io/apimachinery/pkg/util/uuid" diff --git a/lib/resourceutil/name_test.go b/lib/objectutil/name_test.go similarity index 68% rename from lib/resourceutil/name_test.go rename to lib/objectutil/name_test.go index 576391e4..0e33c8e1 100644 --- a/lib/resourceutil/name_test.go +++ b/lib/objectutil/name_test.go @@ -1,33 +1,30 @@ -package resourceutil_test +package objectutil_test import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - - "github.com/syntasso/kratix/lib/resourceutil" + "github.com/syntasso/kratix/lib/objectutil" ) var _ = Describe("Name Utils", func() { When("the given name does not reach the maximum character length", func() { It("is appended with the sha", func() { - name := resourceutil.GenerateObjectName("a-short-name") + name := objectutil.GenerateObjectName("a-short-name") Expect(name).To(MatchRegexp(`^a-short-name-\b\w{5}\b$`)) }) }) When("the given name does exceeds maximum character length", func() { It("is shortened and appended with the sha", func() { - name := resourceutil.GenerateObjectName("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaatrimmed") + name := objectutil.GenerateObjectName("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaatrimmed") Expect(name).To(MatchRegexp(`^aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-\b\w{5}\b$`)) - }) }) When("the given name is the character limit", func() { It("is shortened and appended with the sha", func() { - name := resourceutil.GenerateObjectName("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab") + name := objectutil.GenerateObjectName("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab") Expect(name).To(MatchRegexp(`^aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab-\b\w{5}\b$`)) - }) }) }) diff --git a/lib/pipeline/pipeline_suite_test.go b/lib/objectutil/objectutil_suite_test.go similarity index 54% rename from lib/pipeline/pipeline_suite_test.go rename to lib/objectutil/objectutil_suite_test.go index dda444dc..6e362f49 100644 --- a/lib/pipeline/pipeline_suite_test.go +++ b/lib/objectutil/objectutil_suite_test.go @@ -1,4 +1,4 @@ -package pipeline_test +package objectutil_test import ( "testing" @@ -7,7 +7,7 @@ import ( . "github.com/onsi/gomega" ) -func TestPipeline(t *testing.T) { +func TestObjectutil(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Pipeline Suite") + RunSpecs(t, "Objectutil Suite") } diff --git a/lib/pipeline/args.go b/lib/pipeline/args.go deleted file mode 100644 index c2e5c83b..00000000 --- a/lib/pipeline/args.go +++ /dev/null @@ -1,90 +0,0 @@ -package pipeline - -type PipelineArgs struct { - names map[string]string -} - -func NewPipelineArgs(promiseIdentifier, resourceRequestIdentifier, pName, objectName, namespace string) PipelineArgs { - pipelineID := promiseIdentifier + "-promise-pipeline" - if resourceRequestIdentifier != "" { - pipelineID = promiseIdentifier + "-resource-pipeline" - } - - names := map[string]string{ - "configure-pipeline-name": pipelineName(promiseIdentifier, resourceRequestIdentifier, objectName, pName), - "delete-pipeline-name": pipelineName(promiseIdentifier, resourceRequestIdentifier, objectName, pName), - "promise-id": promiseIdentifier, - "service-account": pipelineID, - "role": pipelineID, - "role-binding": pipelineID, - "config-map": "destination-selectors-" + promiseIdentifier, - "resource-request-id": resourceRequestIdentifier, - "namespace": namespace, - "pipeline-name": pName, - "name": objectName, - } - - return PipelineArgs{ - names: names, - } -} - -func (p PipelineArgs) ConfigurePipelineJobLabels(objHash string) pipelineLabels { - resourceRequestID := p.names["resource-request-id"] - if resourceRequestID == "" { - return LabelsForConfigurePromise(p.PromiseID(), p.PipelineName(), objHash) - } - return LabelsForConfigureResource(resourceRequestID, p.Name(), p.PromiseID(), p.PipelineName(), objHash) -} - -func (p PipelineArgs) DeletePipelineJobLabels() pipelineLabels { - resourceRequestID := p.names["resource-request-id"] - if resourceRequestID == "" { - return LabelsForDeletePromise(p.PromiseID(), p.PipelineName()) - } - return LabelsForDeleteResource(resourceRequestID, p.Name(), p.PromiseID(), p.PipelineName()) -} - -func (p PipelineArgs) ConfigMapName() string { - return p.names["config-map"] -} - -func (p PipelineArgs) ServiceAccountName() string { - return p.names["service-account"] -} - -func (p PipelineArgs) RoleName() string { - return p.names["role"] -} - -func (p PipelineArgs) Name() string { - return p.names["name"] -} - -func (p PipelineArgs) RoleBindingName() string { - return p.names["role-binding"] -} - -func (p PipelineArgs) Namespace() string { - return p.names["namespace"] -} - -func (p PipelineArgs) PromiseID() string { - return p.names["promise-id"] -} - -func (p PipelineArgs) ConfigurePipelineName() string { - return p.names["configure-pipeline-name"] -} - -func (p PipelineArgs) DeletePipelineName() string { - return p.names["delete-pipeline-name"] -} - -func (p PipelineArgs) PipelineName() string { - return p.names["pipeline-name"] -} - -func (p PipelineArgs) Labels() pipelineLabels { - return newPipelineLabels().WithPromiseID(p.PromiseID()) -} diff --git a/lib/pipeline/assets/promise.yaml b/lib/pipeline/assets/promise.yaml deleted file mode 100644 index a80ad8b0..00000000 --- a/lib/pipeline/assets/promise.yaml +++ /dev/null @@ -1,85 +0,0 @@ -apiVersion: platform.kratix.io/v1alpha1 -kind: Promise -metadata: - name: custom-namespace - labels: - kratix.io/promise-version: v1.2.0 - clashing-label: "new-promise-v2-value" - new-promise-v2-label: "value" - annotations: - clashing-annotation: "new-promise-v2-value" - new-promise-v2-annotation: "value" -spec: - api: - apiVersion: apiextensions.k8s.io/v1 - kind: CustomResourceDefinition - metadata: - name: custom-namespaces.marketplace.kratix.io - spec: - group: marketplace.kratix.io - names: - kind: custom-namespace - plural: custom-namespaces - singular: custom-namespace - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - properties: - spec: - properties: - clusterSelectors: - description: | - List of key:value pairs to use as cluster selectors when scheduling - type: object - x-kubernetes-preserve-unknown-fields: true - newConfig: - default: "1" - description: example config - type: string - type: object - type: object - served: true - storage: true - dependencies: - - apiVersion: v1 - kind: Namespace - metadata: - name: custom - destinationSelectors: - - matchLabels: - environment: dev - workflows: - resource: - configure: - - apiVersion: platform.kratix.io/v1alpha1 - kind: Pipeline - metadata: - name: instance-configure - namespace: default - spec: - containers: - - image: syntasso/demo-custom-namespace-configure-pipeline:v1.1.0 - name: demo-custom-namespace-resource-configure-pipeline - delete: - - apiVersion: platform.kratix.io/v1alpha1 - kind: Pipeline - metadata: - name: instance-delete - namespace: default - spec: - containers: - - image: syntasso/demo-custom-namespace-delete-pipeline:v1.1.0 - name: demo-custom-namespace-resource-delete-pipeline - promise: - delete: - - apiVersion: platform.kratix.io/v1alpha1 - kind: Pipeline - metadata: - name: promise-delete - namespace: default - spec: - containers: - - image: syntasso/demo-custom-namespace-delete-pipeline:v1.1.0 - name: demo-custom-namespace-promise-delete-pipeline diff --git a/lib/pipeline/assets/resource-request.yaml b/lib/pipeline/assets/resource-request.yaml deleted file mode 100644 index e3dce3d6..00000000 --- a/lib/pipeline/assets/resource-request.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: marketplace.kratix.io/v1alpha1 -kind: custom-namespace -metadata: - name: example - namespace: default -spec: - newConfig: "1" diff --git a/lib/pipeline/configure.go b/lib/pipeline/configure.go deleted file mode 100644 index 67080638..00000000 --- a/lib/pipeline/configure.go +++ /dev/null @@ -1,240 +0,0 @@ -package pipeline - -import ( - "fmt" - "os" - "strings" - - "github.com/go-logr/logr" - "github.com/syntasso/kratix/api/v1alpha1" - "github.com/syntasso/kratix/lib/hash" - batchv1 "k8s.io/api/batch/v1" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/client-go/kubernetes/scheme" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" -) - -func NewConfigureResource( - rr *unstructured.Unstructured, - promise *unstructured.Unstructured, - crdPlural string, - pipeline v1alpha1.Pipeline, - resourceRequestIdentifier, - promiseIdentifier string, - promiseDestinationSelectors []v1alpha1.PromiseScheduling, - logger logr.Logger, -) ([]client.Object, error) { - - pipelineResources := NewPipelineArgs(promiseIdentifier, resourceRequestIdentifier, pipeline.Name, rr.GetName(), rr.GetNamespace()) - destinationSelectorsConfigMap, err := destinationSelectorsConfigMap(pipelineResources, promiseDestinationSelectors, nil) - if err != nil { - return nil, err - } - - promiseHash, err := hash.ComputeHashForResource(promise) - if err != nil { - return nil, err - } - - objHash, err := hash.ComputeHashForResource(rr) - if err != nil { - return nil, err - } - - combinedHash := hash.ComputeHash(fmt.Sprintf("%s-%s", promiseHash, objHash)) - - job, err := ConfigurePipeline(rr, combinedHash, pipeline, pipelineResources, promiseIdentifier, false, logger) - if err != nil { - return nil, err - } - - resources := []client.Object{ - serviceAccount(pipelineResources), - role(rr, crdPlural, pipelineResources), - roleBinding(pipelineResources), - destinationSelectorsConfigMap, - job, - } - - return resources, nil -} - -func NewConfigurePromise( - uPromise *unstructured.Unstructured, - p v1alpha1.Pipeline, - promiseIdentifier string, - promiseDestinationSelectors []v1alpha1.PromiseScheduling, - logger logr.Logger, -) ([]client.Object, error) { - - pipelineResources := NewPipelineArgs(promiseIdentifier, "", p.Name, uPromise.GetName(), v1alpha1.SystemNamespace) - destinationSelectorsConfigMap, err := destinationSelectorsConfigMap(pipelineResources, promiseDestinationSelectors, nil) - if err != nil { - return nil, err - } - - objHash, err := hash.ComputeHashForResource(uPromise) - if err != nil { - return nil, err - } - - pipeline, err := ConfigurePipeline(uPromise, objHash, p, pipelineResources, promiseIdentifier, true, logger) - if err != nil { - return nil, err - } - - resources := []client.Object{ - serviceAccount(pipelineResources), - clusterRole(pipelineResources), - clusterRoleBinding(pipelineResources), - destinationSelectorsConfigMap, - pipeline, - } - - return resources, nil -} - -func ConfigurePipeline(obj *unstructured.Unstructured, objHash string, pipeline v1alpha1.Pipeline, pipelineArgs PipelineArgs, promiseName string, promiseWorkflow bool, logger logr.Logger) (*batchv1.Job, error) { - volumes := metadataAndSchedulingVolumes(pipelineArgs.ConfigMapName()) - - initContainers, pipelineVolumes := generateConfigurePipelineContainersAndVolumes(obj, pipeline, promiseName, promiseWorkflow, logger) - volumes = append(volumes, pipelineVolumes...) - - objHash, err := hash.ComputeHashForResource(obj) - if err != nil { - return nil, err - } - - var imagePullSecrets []v1.LocalObjectReference - workCreatorPullSecrets := os.Getenv("WC_PULL_SECRET") - if workCreatorPullSecrets != "" { - imagePullSecrets = append(imagePullSecrets, v1.LocalObjectReference{Name: workCreatorPullSecrets}) - } - - imagePullSecrets = append(imagePullSecrets, pipeline.Spec.ImagePullSecrets...) - - job := &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - Name: pipelineArgs.ConfigurePipelineName(), - Namespace: pipelineArgs.Namespace(), - Labels: pipelineArgs.ConfigurePipelineJobLabels(objHash), - }, - Spec: batchv1.JobSpec{ - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: pipelineArgs.ConfigurePipelineJobLabels(objHash), - }, - Spec: v1.PodSpec{ - RestartPolicy: v1.RestartPolicyOnFailure, - ServiceAccountName: pipelineArgs.ServiceAccountName(), - Containers: []v1.Container{ - { - Name: "status-writer", - Image: os.Getenv("WC_IMG"), - Command: []string{"sh", "-c", "update-status"}, - Env: []v1.EnvVar{ - {Name: "OBJECT_KIND", Value: strings.ToLower(obj.GetKind())}, - {Name: "OBJECT_GROUP", Value: obj.GroupVersionKind().Group}, - {Name: "OBJECT_NAME", Value: obj.GetName()}, - {Name: "OBJECT_NAMESPACE", Value: pipelineArgs.Namespace()}, - }, - VolumeMounts: []v1.VolumeMount{{ - MountPath: "/work-creator-files/metadata", - Name: "shared-metadata", - }}, - }, - }, - ImagePullSecrets: imagePullSecrets, - InitContainers: initContainers, - Volumes: volumes, - }, - }, - }, - } - - if err := controllerutil.SetControllerReference(obj, job, scheme.Scheme); err != nil { - logger.Error(err, "Error setting ownership") - return nil, err - } - return job, nil -} - -func generateConfigurePipelineContainersAndVolumes(obj *unstructured.Unstructured, pipeline v1alpha1.Pipeline, promiseName string, promiseWorkflow bool, logger logr.Logger) ([]v1.Container, []v1.Volume) { - workflowType := v1alpha1.WorkflowTypeResource - if promiseWorkflow { - workflowType = v1alpha1.WorkflowTypePromise - } - - kratixEnvVars := []v1.EnvVar{ - { - Name: kratixActionEnvVar, - Value: string(v1alpha1.WorkflowActionConfigure), - }, - { - Name: kratixTypeEnvVar, - Value: string(workflowType), - }, - { - Name: kratixPromiseEnvVar, - Value: promiseName, - }, - } - - containers, volumes := generateContainersAndVolumes(obj, workflowType, pipeline, kratixEnvVars) - - workCreatorCommand := fmt.Sprintf("./work-creator -input-directory /work-creator-files -promise-name %s -pipeline-name %s", promiseName, pipeline.Name) - if promiseWorkflow { - workCreatorCommand += fmt.Sprintf(" -namespace %s -workflow-type %s", v1alpha1.SystemNamespace, v1alpha1.WorkflowTypePromise) - } else { - workCreatorCommand += fmt.Sprintf(" -namespace %s -resource-name %s -workflow-type %s", obj.GetNamespace(), obj.GetName(), v1alpha1.WorkflowTypeResource) - } - - writer := v1.Container{ - Name: "work-writer", - Image: os.Getenv("WC_IMG"), - Command: []string{"sh", "-c", workCreatorCommand}, - VolumeMounts: []v1.VolumeMount{ - { - MountPath: "/work-creator-files/input", - Name: "shared-output", - }, - { - MountPath: "/work-creator-files/metadata", - Name: "shared-metadata", - }, - { - MountPath: "/work-creator-files/kratix-system", - Name: "promise-scheduling", // this volumemount is a configmap - }, - }, - } - - containers = append(containers, writer) - - return containers, volumes -} - -func metadataAndSchedulingVolumes(configMapName string) []v1.Volume { - return []v1.Volume{ - { - Name: "metadata", VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}, - }, - { - Name: "promise-scheduling", - VolumeSource: v1.VolumeSource{ - ConfigMap: &v1.ConfigMapVolumeSource{ - LocalObjectReference: v1.LocalObjectReference{ - Name: configMapName, - }, - Items: []v1.KeyToPath{{ - Key: "destinationSelectors", - Path: "promise-scheduling", - }}, - }, - }, - }, - } -} diff --git a/lib/pipeline/configure_test.go b/lib/pipeline/configure_test.go deleted file mode 100644 index e4c6263a..00000000 --- a/lib/pipeline/configure_test.go +++ /dev/null @@ -1,323 +0,0 @@ -package pipeline_test - -import ( - "os" - - "github.com/go-logr/logr" - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gstruct" - "github.com/onsi/gomega/types" - - "github.com/syntasso/kratix/api/v1alpha1" - "github.com/syntasso/kratix/lib/pipeline" - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" -) - -var _ = Describe("Configure Pipeline", func() { - var ( - rr *unstructured.Unstructured - p v1alpha1.Pipeline - pipelineResources pipeline.PipelineArgs - logger logr.Logger - job *batchv1.Job - err error - labelsMatcher types.GomegaMatcher - ) - - BeforeEach(func() { - rr = &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "Pod", - "metadata": map[string]interface{}{ - "name": "test-pod", - "namespace": "test-namespace", - }, - "spec": map[string]interface{}{ - "foo": "bar", - }, - }, - } - - p = v1alpha1.Pipeline{ - ObjectMeta: metav1.ObjectMeta{ - Name: "configure-step", - }, - Spec: v1alpha1.PipelineSpec{ - Containers: []v1alpha1.Container{ - {Name: "test-container", Image: "test-image"}, - }, - }, - } - logger = logr.Logger{} - - pipelineResources = pipeline.NewPipelineArgs("test-promise", "", "configure-step", "test-name", "test-namespace") - }) - - Describe("Promise Configure Pipeline", func() { - const expectedHash = "9bb58f26192e4ba00f01e2e7b136bbd8" - BeforeEach(func() { - job, err = pipeline.ConfigurePipeline(rr, expectedHash, p, pipelineResources, "test-promise", false, logger) - Expect(err).NotTo(HaveOccurred()) - - labelsMatcher = MatchAllKeys(Keys{ - "kratix.io/hash": Equal(expectedHash), - "kratix-workflow-action": Equal("configure"), - "kratix-workflow-pipeline-name": Equal("configure-step"), - "kratix.io/pipeline-name": Equal("configure-step"), - "kratix-workflow-type": Equal("promise"), - "kratix-workflow-kind": Equal("pipeline.platform.kratix.io"), - "kratix-workflow-promise-version": Equal("v1alpha1"), - "kratix.io/work-type": Equal("promise"), - "kratix.io/promise-name": Equal("test-promise"), - }) - }) - - It("creates a job with the expected metadata", func() { - Expect(job.ObjectMeta).To(MatchFields(IgnoreExtras, Fields{ - "Name": HavePrefix("kratix-test-promise-configure-step-"), - "Namespace": Equal("test-namespace"), - "Labels": labelsMatcher, - })) - }) - - Context("when the pipeline name would exceed the 63 character limit", func() { - BeforeEach(func() { - promiseIdentifier := "long-long-long-long-promise" - pipelineName := "also-very-verbose-pipeline" - pipelineResources = pipeline.NewPipelineArgs(promiseIdentifier, "", pipelineName, "test-name", "test-namespace") - - job, err = pipeline.ConfigurePipeline(rr, expectedHash, p, pipelineResources, "test-promise", false, logger) - - labelsMatcher = MatchAllKeys(Keys{ - "kratix.io/hash": Equal(expectedHash), - "kratix-workflow-action": Equal("configure"), - "kratix-workflow-pipeline-name": Equal(pipelineName), - "kratix.io/pipeline-name": Equal(pipelineName), - "kratix-workflow-type": Equal("promise"), - "kratix-workflow-kind": Equal("pipeline.platform.kratix.io"), - "kratix-workflow-promise-version": Equal("v1alpha1"), - "kratix.io/work-type": Equal("promise"), - "kratix.io/promise-name": Equal(promiseIdentifier), - }) - }) - - It("truncates the pipeline name to ensure it fits the 63 character limit", func() { - Expect(job.ObjectMeta.Name).To(HaveLen(62)) - Expect(job.ObjectMeta).To(MatchFields(IgnoreExtras, Fields{ - "Name": HavePrefix("kratix-long-long-long-long-promise-also-very-verbose-pip-"), - "Namespace": Equal("test-namespace"), - "Labels": labelsMatcher, - })) - }) - }) - }) - - Describe("Resource Configure Pipeline", func() { - const expectedHash = "9bb58f26192e4ba00f01e2e7b136bbd8" - BeforeEach(func() { - pipelineResources = pipeline.NewPipelineArgs("test-promise", "test-promise-test-rr", "configure-step", "test-rr", "test-namespace") - job, err = pipeline.ConfigurePipeline(rr, expectedHash, p, pipelineResources, "test-promise", false, logger) - Expect(err).NotTo(HaveOccurred()) - - labelsMatcher = MatchAllKeys(Keys{ - "kratix.io/hash": Equal(expectedHash), - "kratix-workflow-action": Equal("configure"), - "kratix-workflow-pipeline-name": Equal("configure-step"), - "kratix.io/pipeline-name": Equal("configure-step"), - "kratix-workflow-type": Equal("resource"), - "kratix-workflow-kind": Equal("pipeline.platform.kratix.io"), - "kratix-workflow-promise-version": Equal("v1alpha1"), - "kratix.io/work-type": Equal("resource"), - "kratix.io/promise-name": Equal("test-promise"), - "kratix-promise-resource-request-id": Equal("test-promise-test-rr"), - "kratix.io/resource-name": Equal("test-rr"), - }) - }) - - It("creates a job with the expected metadata", func() { - Expect(job.ObjectMeta).To(MatchFields(IgnoreExtras, Fields{ - "Name": HavePrefix("kratix-test-promise-test-rr-configure-step-"), - "Namespace": Equal("test-namespace"), - "Labels": labelsMatcher, - })) - }) - - Context("when the pipeline name would exceed the 63 character limit", func() { - BeforeEach(func() { - promiseIdentifier := "long-long-long-long-promise" - pipelineResources = pipeline.NewPipelineArgs(promiseIdentifier, "long-long-long-long-promise-test-resource", "configure-step", "test-resource", "test-namespace") - - job, err = pipeline.ConfigurePipeline(rr, expectedHash, p, pipelineResources, "test-promise", false, logger) - - labelsMatcher = MatchAllKeys(Keys{ - "kratix.io/hash": Equal(expectedHash), - "kratix-workflow-action": Equal("configure"), - "kratix-workflow-pipeline-name": Equal("configure-step"), - "kratix.io/pipeline-name": Equal("configure-step"), - "kratix-workflow-type": Equal("resource"), - "kratix-workflow-kind": Equal("pipeline.platform.kratix.io"), - "kratix-workflow-promise-version": Equal("v1alpha1"), - "kratix.io/work-type": Equal("resource"), - "kratix.io/promise-name": Equal(promiseIdentifier), - "kratix-promise-resource-request-id": Equal("long-long-long-long-promise-test-resource"), - "kratix.io/resource-name": Equal("test-resource"), - }) - }) - - It("truncates the pipeline name to ensure it fits the 63 character limit", func() { - Expect(job.ObjectMeta.Name).To(HaveLen(62)) - Expect(job.ObjectMeta).To(MatchFields(IgnoreExtras, Fields{ - "Name": HavePrefix("kratix-long-long-long-long-promise-test-resource-configu-"), - "Namespace": Equal("test-namespace"), - "Labels": labelsMatcher, - })) - }) - }) - }) - - Describe("WorkWriter", func() { - When("its a promise", func() { - It("runs the work-creator with the correct arguments", func() { - p.Spec.Containers = append(p.Spec.Containers, v1alpha1.Container{ - Name: "another-container", - Image: "another-image", - Args: []string{"arg1", "arg2"}, - Command: []string{"command1", "command2"}, - }) - job, err := pipeline.ConfigurePipeline(rr, "hash", p, pipelineResources, "test-promise", true, logger) - Expect(err).NotTo(HaveOccurred()) - - Expect(job.Spec.Template.Spec.InitContainers[3].Command).To(ConsistOf( - "sh", - "-c", - "./work-creator -input-directory /work-creator-files -promise-name test-promise -pipeline-name configure-step -namespace kratix-platform-system -workflow-type promise", - )) - }) - }) - - When("its a resource request", func() { - It("runs the work-creator with the correct arguments", func() { - p.Spec.Containers = append(p.Spec.Containers, v1alpha1.Container{ - Name: "another-container", - Image: "another-image", - Args: []string{"arg1", "arg2"}, - Command: []string{"command1", "command2"}, - }) - job, err := pipeline.ConfigurePipeline(rr, "hash", p, pipelineResources, "test-promise", false, logger) - Expect(err).NotTo(HaveOccurred()) - - Expect(job.Spec.Template.Spec.InitContainers[3].Command).To(ConsistOf( - "sh", - "-c", - "./work-creator -input-directory /work-creator-files -promise-name test-promise -pipeline-name configure-step -namespace test-namespace -resource-name test-pod -workflow-type resource", - )) - }) - }) - }) - - Describe("optional workflow configs", func() { - It("can include args and commands", func() { - p.Spec.Containers = append(p.Spec.Containers, v1alpha1.Container{ - Name: "another-container", - Image: "another-image", - Args: []string{"arg1", "arg2"}, - Command: []string{"command1", "command2"}, - }) - job, err := pipeline.ConfigurePipeline(rr, "hash", p, pipelineResources, "test-promise", false, logger) - Expect(err).NotTo(HaveOccurred()) - - Expect(job.Spec.Template.Spec.InitContainers[1].Args).To(BeEmpty()) - Expect(job.Spec.Template.Spec.InitContainers[1].Command).To(BeEmpty()) - Expect(job.Spec.Template.Spec.InitContainers[2].Args).To(Equal([]string{"arg1", "arg2"})) - Expect(job.Spec.Template.Spec.InitContainers[2].Command).To(Equal([]string{"command1", "command2"})) - }) - - It("can include env and envFrom", func() { - p.Spec.Containers = append(p.Spec.Containers, v1alpha1.Container{ - Name: "another-container", - Image: "another-image", - Env: []corev1.EnvVar{ - {Name: "env1", Value: "value1"}, - }, - EnvFrom: []corev1.EnvFromSource{ - { - ConfigMapRef: &corev1.ConfigMapEnvSource{ - LocalObjectReference: corev1.LocalObjectReference{Name: "test-configmap"}, - }, - }, - }, - }) - job, err := pipeline.ConfigurePipeline(rr, "hash", p, pipelineResources, "test-promise", false, logger) - Expect(err).NotTo(HaveOccurred()) - - Expect(job.Spec.Template.Spec.InitContainers[1].Env).To(ContainElements( - corev1.EnvVar{Name: "KRATIX_WORKFLOW_ACTION", Value: "configure"}, - corev1.EnvVar{Name: "KRATIX_WORKFLOW_TYPE", Value: "resource"}, - )) - Expect(job.Spec.Template.Spec.InitContainers[2].Env).To(ContainElements( - corev1.EnvVar{Name: "KRATIX_WORKFLOW_ACTION", Value: "configure"}, - corev1.EnvVar{Name: "KRATIX_WORKFLOW_TYPE", Value: "resource"}, - corev1.EnvVar{Name: "env1", Value: "value1"}, - )) - - Expect(job.Spec.Template.Spec.InitContainers[1].EnvFrom).To(BeNil()) - Expect(job.Spec.Template.Spec.InitContainers[2].EnvFrom).To(ContainElements( - corev1.EnvFromSource{ - ConfigMapRef: &corev1.ConfigMapEnvSource{ - LocalObjectReference: corev1.LocalObjectReference{Name: "test-configmap"}, - }, - }, - )) - }) - - It("can include volume and volume mounts", func() { - p.Spec.Volumes = []corev1.Volume{ - {Name: "test-volume", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, - } - p.Spec.Containers = append(p.Spec.Containers, v1alpha1.Container{ - Name: "another-container", - Image: "another-image", - VolumeMounts: []corev1.VolumeMount{ - {Name: "test-volume-mount", MountPath: "/test-mount-path"}, - }, - }) - job, err := pipeline.ConfigurePipeline(rr, "hash", p, pipelineResources, "test-promise", false, logger) - Expect(err).NotTo(HaveOccurred()) - - Expect(job.Spec.Template.Spec.InitContainers[1].VolumeMounts).To(HaveLen(3), "default volume mounts should've been included") - Expect(job.Spec.Template.Spec.InitContainers[1].Command).To(BeEmpty()) - Expect(job.Spec.Template.Spec.InitContainers[2].VolumeMounts).To(ContainElement( - corev1.VolumeMount{Name: "test-volume-mount", MountPath: "/test-mount-path"}, - )) - Expect(job.Spec.Template.Spec.Volumes).To(ContainElement( - corev1.Volume{Name: "test-volume", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, - )) - }) - - It("can include imagePullPolicy and imagePullSecrets", func() { - os.Setenv("WC_PULL_SECRET", "registry-secret") - p.Spec.ImagePullSecrets = []corev1.LocalObjectReference{{Name: "test-secret"}, {Name: "another-secret"}} - p.Spec.Containers = append(p.Spec.Containers, v1alpha1.Container{ - Name: "another-container", - Image: "another-image", - ImagePullPolicy: corev1.PullAlways, - }) - job, err := pipeline.ConfigurePipeline(rr, "hash", p, pipelineResources, "test-promise", false, logger) - Expect(err).NotTo(HaveOccurred()) - - Expect(job.Spec.Template.Spec.ImagePullSecrets).To(HaveLen(3), "imagePullSecrets should've been included") - Expect(job.Spec.Template.Spec.ImagePullSecrets).To(ContainElements( - corev1.LocalObjectReference{Name: "registry-secret"}, - corev1.LocalObjectReference{Name: "test-secret"}, - corev1.LocalObjectReference{Name: "another-secret"}, - ), "imagePullSecrets should've been included") - Expect(job.Spec.Template.Spec.InitContainers[1].ImagePullPolicy).To(BeEmpty()) - Expect(job.Spec.Template.Spec.InitContainers[2].ImagePullPolicy).To(Equal(corev1.PullAlways)) - }) - }) -}) diff --git a/lib/pipeline/delete.go b/lib/pipeline/delete.go deleted file mode 100644 index a542c48a..00000000 --- a/lib/pipeline/delete.go +++ /dev/null @@ -1,83 +0,0 @@ -package pipeline - -import ( - "github.com/syntasso/kratix/api/v1alpha1" - batchv1 "k8s.io/api/batch/v1" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -const kratixActionDelete = "delete" - -func NewDeleteResource(rr *unstructured.Unstructured, pipeline v1alpha1.Pipeline, resourceRequestIdentifier, promiseIdentifier, crdPlural string) []client.Object { - return NewDelete(rr, pipeline, resourceRequestIdentifier, promiseIdentifier, crdPlural) -} - -func NewDeletePromise(promise *unstructured.Unstructured, pipeline v1alpha1.Pipeline) []client.Object { - return NewDelete(promise, pipeline, "", promise.GetName(), v1alpha1.PromisePlural) -} - -func NewDelete(obj *unstructured.Unstructured, pipeline v1alpha1.Pipeline, resourceRequestIdentifier, promiseIdentifier, objPlural string) []client.Object { - isPromise := resourceRequestIdentifier == "" - namespace := obj.GetNamespace() - if isPromise { - namespace = v1alpha1.SystemNamespace - } - - args := NewPipelineArgs(promiseIdentifier, resourceRequestIdentifier, pipeline.Name, obj.GetName(), namespace) - - containers, pipelineVolumes := generateDeletePipelineContainersAndVolumes(obj, isPromise, pipeline) - - imagePullSecrets := pipeline.Spec.ImagePullSecrets - - resources := []client.Object{ - serviceAccount(args), - role(obj, objPlural, args), - roleBinding(args), - &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - Name: args.DeletePipelineName(), - Namespace: args.Namespace(), - Labels: args.DeletePipelineJobLabels(), - }, - Spec: batchv1.JobSpec{ - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: args.DeletePipelineJobLabels(), - }, - Spec: v1.PodSpec{ - RestartPolicy: v1.RestartPolicyOnFailure, - ServiceAccountName: args.ServiceAccountName(), - Containers: []v1.Container{containers[len(containers)-1]}, - InitContainers: containers[0 : len(containers)-1], - Volumes: pipelineVolumes, - ImagePullSecrets: imagePullSecrets, - }, - }, - }, - }, - } - - return resources -} - -func generateDeletePipelineContainersAndVolumes(obj *unstructured.Unstructured, isPromise bool, pipeline v1alpha1.Pipeline) ([]v1.Container, []v1.Volume) { - workflowType := v1alpha1.WorkflowTypeResource - if isPromise { - workflowType = v1alpha1.WorkflowTypePromise - } - kratixEnvVars := []v1.EnvVar{ - { - Name: kratixActionEnvVar, - Value: kratixActionDelete, - }, - { - Name: kratixTypeEnvVar, - Value: string(workflowType), - }, - } - - return generateContainersAndVolumes(obj, workflowType, pipeline, kratixEnvVars) -} diff --git a/lib/pipeline/delete_test.go b/lib/pipeline/delete_test.go deleted file mode 100644 index 0062b2f5..00000000 --- a/lib/pipeline/delete_test.go +++ /dev/null @@ -1,629 +0,0 @@ -package pipeline_test - -import ( - "os" - - "github.com/go-logr/logr" - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/util/yaml" - "sigs.k8s.io/controller-runtime/pkg/client" - - . "github.com/onsi/gomega/gstruct" - "github.com/syntasso/kratix/api/v1alpha1" - "github.com/syntasso/kratix/lib/pipeline" -) - -var _ = Describe("Delete Pipeline", func() { - var ( - logger = logr.Discard() - pipelineResources []client.Object - serviceAccount v1.ServiceAccount - role rbacv1.Role - roleBinding rbacv1.RoleBinding - job batchv1.Job - ) - - const ( - promisePath = "assets/promise.yaml" - resourceRequestPath = "assets/resource-request.yaml" - ) - - Describe("Promise", func() { - var ( - expectedObjectMeta = metav1.ObjectMeta{ - Name: "custom-namespace-promise-pipeline", - Namespace: "kratix-platform-system", - Labels: map[string]string{ - "kratix.io/promise-name": "custom-namespace", - }, - } - ) - - When("delete pipeline resources are generated", func() { - BeforeEach(func() { - promise := promiseFromFile(promisePath) - unstructuredPromise, err := promise.ToUnstructured() - Expect(err).ToNot(HaveOccurred()) - - pipelines, err := promise.GeneratePipelines(logger) - Expect(err).ToNot(HaveOccurred()) - - pipelineResources = pipeline.NewDeletePromise( - unstructuredPromise, - pipelines.DeletePromise[0], - ) - }) - - It("creates a Job, ServiceAccount, Role, and RoleBinding", func() { - Expect(pipelineResources).To(HaveLen(4)) - - Expect(pipelineResources[0]).To(BeAssignableToTypeOf(&v1.ServiceAccount{})) - Expect(pipelineResources[1]).To(BeAssignableToTypeOf(&rbacv1.Role{})) - Expect(pipelineResources[2]).To(BeAssignableToTypeOf(&rbacv1.RoleBinding{})) - Expect(pipelineResources[3]).To(BeAssignableToTypeOf(&batchv1.Job{})) - - //TODO: move testing of service account, role, and role binding to shared_test.go - }) - - It("creates the ServiceAccount with the right metadata", func() { - serviceAccount = *pipelineResources[0].(*v1.ServiceAccount) - expectedServiceAccount := v1.ServiceAccount{ - ObjectMeta: expectedObjectMeta, - } - Expect(serviceAccount).To(Equal(expectedServiceAccount)) - }) - - It("creates the Role with the right metadata and rules", func() { - role = *pipelineResources[1].(*rbacv1.Role) - expectedRole := rbacv1.Role{ - ObjectMeta: expectedObjectMeta, - Rules: []rbacv1.PolicyRule{ - { - APIGroups: []string{"platform.kratix.io"}, - Resources: []string{"promises", "promises/status"}, - Verbs: []string{"get", "list", "update", "create", "patch"}, - }, - { - APIGroups: []string{"platform.kratix.io"}, - Resources: []string{"works"}, - Verbs: []string{"*"}, - }, - }, - } - Expect(role).To(Equal(expectedRole)) - }) - - It("creates the RoleBinding with the right metadata, roleRef, and subjects", func() { - roleBinding = *pipelineResources[2].(*rbacv1.RoleBinding) - expectedRoleBinding := rbacv1.RoleBinding{ - ObjectMeta: expectedObjectMeta, - RoleRef: rbacv1.RoleRef{ - Kind: "Role", - APIGroup: "rbac.authorization.k8s.io", - Name: "custom-namespace-promise-pipeline", - }, - Subjects: []rbacv1.Subject{ - { - Kind: "ServiceAccount", - Namespace: "kratix-platform-system", - Name: "custom-namespace-promise-pipeline", - }, - }, - } - Expect(roleBinding).To(Equal(expectedRoleBinding)) - }) - - It("creates the Job with the right metadata and spec", func() { - job = *pipelineResources[3].(*batchv1.Job) - - labelsMatcher := MatchAllKeys(Keys{ - "kratix-workflow-kind": Equal("pipeline.platform.kratix.io"), - "kratix-workflow-promise-version": Equal("v1alpha1"), - "kratix-workflow-type": Equal("promise"), - "kratix-workflow-action": Equal("delete"), - "kratix.io/promise-name": Equal("custom-namespace"), - "kratix.io/pipeline-name": Equal("promise-delete"), - "kratix.io/work-type": Equal("promise"), - "kratix-workflow-pipeline-name": Equal("promise-delete"), - }) - - Expect(job).To(MatchFields(IgnoreExtras, Fields{ - "ObjectMeta": MatchFields(IgnoreExtras, Fields{ - "Name": HavePrefix("kratix-custom-namespace-promise-delete-"), - "Namespace": Equal("kratix-platform-system"), - "Labels": labelsMatcher, - }), - "Spec": MatchFields(IgnoreExtras, Fields{ - "Template": MatchFields(IgnoreExtras, Fields{ - "ObjectMeta": MatchFields(IgnoreExtras, Fields{ - "Labels": labelsMatcher, - }), - "Spec": MatchFields(IgnoreExtras, Fields{ - "RestartPolicy": Equal(v1.RestartPolicyOnFailure), - "ServiceAccountName": Equal("custom-namespace-promise-pipeline"), - "Containers": MatchAllElementsWithIndex(IndexIdentity, Elements{ - "0": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("demo-custom-namespace-promise-delete-pipeline"), - "Image": Equal("syntasso/demo-custom-namespace-delete-pipeline:v1.1.0"), - "VolumeMounts": ConsistOf( - MatchFields(IgnoreExtras, Fields{ - "MountPath": Equal("/kratix/input"), - "Name": Equal("shared-input"), - "ReadOnly": Equal(true), - }), - MatchFields(IgnoreExtras, Fields{ - "MountPath": Equal("/kratix/output"), - "Name": Equal("shared-output"), - }), - MatchFields(IgnoreExtras, Fields{ - "MountPath": Equal("/kratix/metadata"), - "Name": Equal("shared-metadata"), - }), - ), - "Env": ConsistOf( - MatchFields(IgnoreExtras, Fields{ - "Name": Equal("KRATIX_WORKFLOW_ACTION"), - "Value": Equal("delete"), - }), - MatchFields(IgnoreExtras, Fields{ - "Name": Equal("KRATIX_WORKFLOW_TYPE"), - "Value": Equal("promise"), - }), - ), - }), - }), - "InitContainers": MatchAllElementsWithIndex(IndexIdentity, Elements{ - "0": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("reader"), - "Env": ConsistOf( - MatchFields(IgnoreExtras, Fields{ - "Name": Equal("OBJECT_KIND"), - "Value": Equal("promise"), - }), - MatchFields(IgnoreExtras, Fields{ - "Name": Equal("OBJECT_GROUP"), - "Value": Equal("platform.kratix.io"), - }), - MatchFields(IgnoreExtras, Fields{ - "Name": Equal("OBJECT_NAME"), - "Value": Equal("custom-namespace"), - }), - MatchFields(IgnoreExtras, Fields{ - "Name": Equal("OBJECT_NAMESPACE"), - "Value": Equal("kratix-platform-system"), - }), - MatchFields(IgnoreExtras, Fields{ - "Name": Equal("KRATIX_WORKFLOW_TYPE"), - "Value": Equal("promise"), - }), - ), - "VolumeMounts": ConsistOf( - MatchFields(IgnoreExtras, Fields{ - "MountPath": Equal("/kratix/input"), - "Name": Equal("shared-input"), - }), - MatchFields(IgnoreExtras, Fields{ - "MountPath": Equal("/kratix/output"), - "Name": Equal("shared-output"), - }), - ), - "Command": ConsistOf( - Equal("sh"), - Equal("-c"), - Equal("reader"), - ), - }), - })}, - ), - }), - }), - })) - }) - - Context("the pipeline name would exceed the 63 character limit", func() { - BeforeEach(func() { - promise := promiseFromFile(promisePath) - promise.SetName("long-long-long-long-long-long-promise") - unstructuredPromise, err := promise.ToUnstructured() - Expect(err).ToNot(HaveOccurred()) - - pipelines, err := promise.GeneratePipelines(logger) - Expect(err).ToNot(HaveOccurred()) - - pipelineResources = pipeline.NewDeletePromise( - unstructuredPromise, - pipelines.DeletePromise[0], - ) - }) - - It("truncates the pipeline name to ensure it fits the 63 character limit", func() { - job = *pipelineResources[3].(*batchv1.Job) - Expect(job.ObjectMeta.Name).To(HaveLen(62)) - Expect(job.ObjectMeta.Name).To(HavePrefix("kratix-long-long-long-long-long-long-promise-promise-del-")) - }) - }) - }) - }) - - Describe("Resource", func() { - var ( - expectedObjectMeta = metav1.ObjectMeta{ - Name: "custom-namespace-resource-pipeline", - Namespace: "default", - Labels: map[string]string{ - "kratix.io/promise-name": "custom-namespace", - }, - } - ) - - When("delete pipeline resources are generated", func() { - BeforeEach(func() { - promise := promiseFromFile(promisePath) - resourceRequest := resourceRequestFromFile(resourceRequestPath) - - pipelines, err := promise.GeneratePipelines(logger) - Expect(err).ToNot(HaveOccurred()) - - pipelineResources = pipeline.NewDeleteResource( - resourceRequest, - pipelines.DeleteResource[0], - "example-ns", - "custom-namespace", - "custom-namespaces", - ) - }) - - It("creates a Job, ServiceAccount, Role, and RoleBinding", func() { - Expect(pipelineResources).To(HaveLen(4)) - - Expect(pipelineResources[0]).To(BeAssignableToTypeOf(&v1.ServiceAccount{})) - Expect(pipelineResources[1]).To(BeAssignableToTypeOf(&rbacv1.Role{})) - Expect(pipelineResources[2]).To(BeAssignableToTypeOf(&rbacv1.RoleBinding{})) - Expect(pipelineResources[3]).To(BeAssignableToTypeOf(&batchv1.Job{})) - - //TODO: move testing of service account, role, and role binding to shared_test.go - }) - - It("creates the ServiceAccount with the right metadata", func() { - serviceAccount = *pipelineResources[0].(*v1.ServiceAccount) - expectedServiceAccount := v1.ServiceAccount{ - ObjectMeta: expectedObjectMeta, - } - Expect(serviceAccount).To(Equal(expectedServiceAccount)) - }) - - It("creates the Role with the right metadata and rules", func() { - role = *pipelineResources[1].(*rbacv1.Role) - expectedRole := rbacv1.Role{ - ObjectMeta: expectedObjectMeta, - Rules: []rbacv1.PolicyRule{ - { - APIGroups: []string{"marketplace.kratix.io"}, - Resources: []string{"custom-namespaces", "custom-namespaces/status"}, - Verbs: []string{"get", "list", "update", "create", "patch"}, - }, - { - APIGroups: []string{"platform.kratix.io"}, - Resources: []string{"works"}, - Verbs: []string{"*"}, - }, - }, - } - Expect(role).To(Equal(expectedRole)) - }) - - It("creates the RoleBinding with the right metadata, roleRef, and subjects", func() { - roleBinding = *pipelineResources[2].(*rbacv1.RoleBinding) - expectedRoleBinding := rbacv1.RoleBinding{ - ObjectMeta: expectedObjectMeta, - RoleRef: rbacv1.RoleRef{ - Kind: "Role", - APIGroup: "rbac.authorization.k8s.io", - Name: "custom-namespace-resource-pipeline", - }, - Subjects: []rbacv1.Subject{ - { - Kind: "ServiceAccount", - Namespace: "default", - Name: "custom-namespace-resource-pipeline", - }, - }, - } - Expect(roleBinding).To(Equal(expectedRoleBinding)) - }) - - It("creates the Job with the right metadata and spec", func() { - job = *pipelineResources[3].(*batchv1.Job) - - labelsMatcher := MatchAllKeys(Keys{ - "kratix-workflow-kind": Equal("pipeline.platform.kratix.io"), - "kratix-workflow-promise-version": Equal("v1alpha1"), - "kratix-workflow-type": Equal("resource"), - "kratix-workflow-action": Equal("delete"), - "kratix.io/promise-name": Equal("custom-namespace"), - "kratix-promise-resource-request-id": Equal("example-ns"), - "kratix-workflow-pipeline-name": Equal("instance-delete"), - "kratix.io/pipeline-name": Equal("instance-delete"), - "kratix.io/work-type": Equal("resource"), - "kratix.io/resource-name": Equal("example"), - }) - - Expect(job).To(MatchFields(IgnoreExtras, Fields{ - "ObjectMeta": MatchFields(IgnoreExtras, Fields{ - "Name": HavePrefix("kratix-custom-namespace-example-instance-delete-"), - "Namespace": Equal("default"), - "Labels": labelsMatcher, - }), - "Spec": MatchFields(IgnoreExtras, Fields{ - "Template": MatchFields(IgnoreExtras, Fields{ - "ObjectMeta": MatchFields(IgnoreExtras, Fields{ - "Labels": labelsMatcher, - }), - "Spec": MatchFields(IgnoreExtras, Fields{ - "RestartPolicy": Equal(v1.RestartPolicyOnFailure), - "ServiceAccountName": Equal("custom-namespace-resource-pipeline"), - "Containers": MatchAllElementsWithIndex(IndexIdentity, Elements{ - "0": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("demo-custom-namespace-resource-delete-pipeline"), - "Image": Equal("syntasso/demo-custom-namespace-delete-pipeline:v1.1.0"), - "VolumeMounts": ConsistOf( - MatchFields(IgnoreExtras, Fields{ - "MountPath": Equal("/kratix/input"), - "Name": Equal("shared-input"), - "ReadOnly": Equal(true), - }), - MatchFields(IgnoreExtras, Fields{ - "MountPath": Equal("/kratix/output"), - "Name": Equal("shared-output"), - }), - MatchFields(IgnoreExtras, Fields{ - "MountPath": Equal("/kratix/metadata"), - "Name": Equal("shared-metadata"), - }), - ), - "Env": ConsistOf( - MatchFields(IgnoreExtras, Fields{ - "Name": Equal("KRATIX_WORKFLOW_ACTION"), - "Value": Equal("delete"), - }), - MatchFields(IgnoreExtras, Fields{ - "Name": Equal("KRATIX_WORKFLOW_TYPE"), - "Value": Equal("resource"), - }), - ), - }), - }), - "InitContainers": MatchAllElementsWithIndex(IndexIdentity, Elements{ - "0": MatchFields(IgnoreExtras, Fields{ - "Name": Equal("reader"), - "Env": ConsistOf( - MatchFields(IgnoreExtras, Fields{ - "Name": Equal("OBJECT_KIND"), - "Value": Equal("custom-namespace"), - }), - MatchFields(IgnoreExtras, Fields{ - "Name": Equal("OBJECT_GROUP"), - "Value": Equal("marketplace.kratix.io"), - }), - MatchFields(IgnoreExtras, Fields{ - "Name": Equal("OBJECT_NAME"), - "Value": Equal("example"), - }), - MatchFields(IgnoreExtras, Fields{ - "Name": Equal("OBJECT_NAMESPACE"), - "Value": Equal("default"), - }), - MatchFields(IgnoreExtras, Fields{ - "Name": Equal("KRATIX_WORKFLOW_TYPE"), - "Value": Equal("resource"), - }), - ), - "VolumeMounts": ConsistOf( - MatchFields(IgnoreExtras, Fields{ - "MountPath": Equal("/kratix/input"), - "Name": Equal("shared-input"), - }), - MatchFields(IgnoreExtras, Fields{ - "MountPath": Equal("/kratix/output"), - "Name": Equal("shared-output"), - }), - ), - "Command": ConsistOf( - Equal("sh"), - Equal("-c"), - Equal("reader"), - ), - }), - })}, - ), - }), - }), - })) - }) - - Context("the pipeline name would exceed the 63 character limit", func() { - BeforeEach(func() { - promise := promiseFromFile(promisePath) - resourceRequest := resourceRequestFromFile(resourceRequestPath) - resourceRequest.SetName("long-long-request") - - pipelines, err := promise.GeneratePipelines(logger) - Expect(err).ToNot(HaveOccurred()) - - pipelineResources = pipeline.NewDeleteResource( - resourceRequest, - pipelines.DeleteResource[0], - "long-long-request", - "long-long-promise", - "long-long-promises", - ) - }) - - It("truncates the pipeline name to ensure it fits the 63 character limit", func() { - job = *pipelineResources[3].(*batchv1.Job) - Expect(job.ObjectMeta.Name).To(HaveLen(62)) - Expect(job.ObjectMeta.Name).To(HavePrefix("kratix-long-long-promise-long-long-request-instance-dele-")) - }) - }) - }) - }) - - Describe("optional workflow configs", func() { - var ( - rr *unstructured.Unstructured - p v1alpha1.Pipeline - ) - - BeforeEach(func() { - rr = &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "Pod", - "metadata": map[string]interface{}{ - "name": "test-pod", - "namespace": "test-namespace", - }, - "spec": map[string]interface{}{ - "foo": "bar", - }, - }, - } - - p = v1alpha1.Pipeline{ - Spec: v1alpha1.PipelineSpec{ - Containers: []v1alpha1.Container{ - {Name: "test-container", Image: "test-image"}, - }, - }, - } - }) - - It("can include args and commands", func() { - p.Spec.Containers = append(p.Spec.Containers, v1alpha1.Container{ - Name: "another-container", - Image: "another-image", - Args: []string{"arg1", "arg2"}, - Command: []string{"command1", "command2"}, - }) - resources := pipeline.NewDelete(rr, p, "", "test-promise", "promises") - job := resources[3].(*batchv1.Job) - - Expect(job.Spec.Template.Spec.InitContainers[1].Args).To(BeEmpty()) - Expect(job.Spec.Template.Spec.InitContainers[1].Command).To(BeEmpty()) - Expect(job.Spec.Template.Spec.Containers[0].Args).To(Equal([]string{"arg1", "arg2"})) - Expect(job.Spec.Template.Spec.Containers[0].Command).To(Equal([]string{"command1", "command2"})) - }) - - It("can include env and envFrom", func() { - p.Spec.Containers = append(p.Spec.Containers, v1alpha1.Container{ - Name: "another-container", - Image: "another-image", - Env: []corev1.EnvVar{ - {Name: "env1", Value: "value1"}, - }, - EnvFrom: []corev1.EnvFromSource{ - { - ConfigMapRef: &corev1.ConfigMapEnvSource{ - LocalObjectReference: corev1.LocalObjectReference{Name: "test-configmap"}, - }, - }, - }, - }) - resources := pipeline.NewDelete(rr, p, "", "test-promise", "promises") - job := resources[3].(*batchv1.Job) - - Expect(job.Spec.Template.Spec.InitContainers[1].Env).To(ContainElements( - corev1.EnvVar{Name: "KRATIX_WORKFLOW_ACTION", Value: "delete"}, - )) - Expect(job.Spec.Template.Spec.Containers[0].Env).To(ContainElements( - corev1.EnvVar{Name: "KRATIX_WORKFLOW_ACTION", Value: "delete"}, - corev1.EnvVar{Name: "env1", Value: "value1"}, - )) - - Expect(job.Spec.Template.Spec.InitContainers[1].EnvFrom).To(BeNil()) - Expect(job.Spec.Template.Spec.Containers[0].EnvFrom).To(ContainElements( - corev1.EnvFromSource{ - ConfigMapRef: &corev1.ConfigMapEnvSource{ - LocalObjectReference: corev1.LocalObjectReference{Name: "test-configmap"}, - }, - }, - )) - }) - - It("can include volume and volume mounts", func() { - p.Spec.Volumes = []corev1.Volume{ - {Name: "test-volume", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, - } - p.Spec.Containers = append(p.Spec.Containers, v1alpha1.Container{ - Name: "another-container", - Image: "another-image", - VolumeMounts: []corev1.VolumeMount{ - {Name: "test-volume-mount", MountPath: "/test-mount-path"}, - }, - }) - resources := pipeline.NewDelete(rr, p, "", "test-promise", "promises") - job := resources[3].(*batchv1.Job) - - Expect(job.Spec.Template.Spec.InitContainers[1].VolumeMounts).To(HaveLen(3), "default volume mounts should've been included") - Expect(job.Spec.Template.Spec.InitContainers[1].Command).To(BeEmpty()) - Expect(job.Spec.Template.Spec.Containers[0].VolumeMounts).To(ContainElement( - corev1.VolumeMount{Name: "test-volume-mount", MountPath: "/test-mount-path"}, - )) - Expect(job.Spec.Template.Spec.Volumes).To(ContainElement( - corev1.Volume{Name: "test-volume", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, - )) - }) - - It("can include imagePullPolicy and imagePullSecrets", func() { - p.Spec.ImagePullSecrets = []corev1.LocalObjectReference{{Name: "test-secret"}, {Name: "another-secret"}} - p.Spec.Containers = append(p.Spec.Containers, v1alpha1.Container{ - Name: "another-container", - Image: "another-image", - ImagePullPolicy: corev1.PullAlways, - }) - resources := pipeline.NewDelete(rr, p, "", "test-promise", "promises") - job := resources[3].(*batchv1.Job) - - Expect(job.Spec.Template.Spec.ImagePullSecrets).To(HaveLen(2), "imagePullSecrets should've been included") - Expect(job.Spec.Template.Spec.ImagePullSecrets).To(ContainElements( - corev1.LocalObjectReference{Name: "test-secret"}, - corev1.LocalObjectReference{Name: "another-secret"}, - ), "imagePullSecrets should've been included") - Expect(job.Spec.Template.Spec.InitContainers[1].ImagePullPolicy).To(BeEmpty()) - Expect(job.Spec.Template.Spec.Containers[0].ImagePullPolicy).To(Equal(corev1.PullAlways)) - }) - }) -}) - -func promiseFromFile(path string) *v1alpha1.Promise { - promiseBody, err := os.Open(path) - Expect(err).ToNot(HaveOccurred()) - - decoder := yaml.NewYAMLOrJSONDecoder(promiseBody, 2048) - promise := &v1alpha1.Promise{} - err = decoder.Decode(promise) - Expect(err).ToNot(HaveOccurred()) - promiseBody.Close() - - return promise -} - -func resourceRequestFromFile(path string) *unstructured.Unstructured { - body, err := os.Open(path) - Expect(err).ToNot(HaveOccurred()) - - decoder := yaml.NewYAMLOrJSONDecoder(body, 2048) - resourceRequest := &unstructured.Unstructured{} - err = decoder.Decode(resourceRequest) - Expect(err).ToNot(HaveOccurred()) - body.Close() - - return resourceRequest -} diff --git a/lib/pipeline/labels.go b/lib/pipeline/labels.go deleted file mode 100644 index f6ae2713..00000000 --- a/lib/pipeline/labels.go +++ /dev/null @@ -1,95 +0,0 @@ -package pipeline - -import ( - "github.com/syntasso/kratix/api/v1alpha1" -) - -type pipelineLabels map[string]string -type action string - -func newPipelineLabels() pipelineLabels { - return make(map[string]string) -} - -func LabelsForAllResourceWorkflows(rrID, promiseID string) map[string]string { - return ResourceLabels(rrID, "", promiseID). - WithWorkflow(v1alpha1.WorkflowTypeResource, "", "") -} - -func LabelsForAllPromiseWorkflows(promiseID string) map[string]string { - return PromiseLabels(promiseID). - WithWorkflow(v1alpha1.WorkflowTypePromise, "", "") -} - -func LabelsForDeleteResource(rrID, rrName, promiseID, pipelineName string, requestSHA ...string) map[string]string { - labels := ResourceLabels(rrID, rrName, promiseID).WithWorkflow(v1alpha1.WorkflowTypeResource, v1alpha1.WorkflowActionDelete, pipelineName) - if len(requestSHA) > 0 { - return labels.WithRequestSHA(requestSHA[0]) - } - return labels -} - -func LabelsForConfigureResource(rrID, rrName, promiseID, pipelineName string, requestSHA ...string) map[string]string { - labels := ResourceLabels(rrID, rrName, promiseID).WithWorkflow(v1alpha1.WorkflowTypeResource, v1alpha1.WorkflowActionConfigure, pipelineName) - if len(requestSHA) > 0 { - return labels.WithRequestSHA(requestSHA[0]) - } - return labels -} - -func LabelsForDeletePromise(promiseID, pipelineName string, requestSHA ...string) map[string]string { - labels := PromiseLabels(promiseID).WithWorkflow(v1alpha1.WorkflowTypePromise, v1alpha1.WorkflowActionDelete, pipelineName) - if len(requestSHA) > 0 { - return labels.WithRequestSHA(requestSHA[0]) - } - return labels -} - -func LabelsForConfigurePromise(promiseID, pipelineName string, requestSHA ...string) map[string]string { - labels := PromiseLabels(promiseID).WithWorkflow(v1alpha1.WorkflowTypePromise, v1alpha1.WorkflowActionConfigure, pipelineName) - if len(requestSHA) > 0 { - return labels.WithRequestSHA(requestSHA[0]) - } - return labels -} - -func ResourceLabels(rrID, rrName, promiseID string) pipelineLabels { - return PromiseLabels(promiseID).WithResourceRequestID(rrID, rrName) -} - -func PromiseLabels(promiseID string) pipelineLabels { - return newPipelineLabels().WithPromiseID(promiseID) -} - -func (p pipelineLabels) WithPromiseID(promiseID string) pipelineLabels { - p[v1alpha1.PromiseNameLabel] = promiseID - return p -} - -func (p pipelineLabels) WithResourceRequestID(resourceRequestID, resourceRequestName string) pipelineLabels { - p["kratix-promise-resource-request-id"] = resourceRequestID - if resourceRequestName != "" { - p[v1alpha1.ResourceNameLabel] = resourceRequestName - } - return p -} - -func (p pipelineLabels) WithWorkflow(workflowType v1alpha1.Type, workflowAction v1alpha1.Action, pipelineName string) pipelineLabels { - p["kratix-workflow-kind"] = "pipeline.platform.kratix.io" - p["kratix-workflow-promise-version"] = "v1alpha1" - p["kratix-workflow-type"] = string(workflowType) - p[v1alpha1.WorkTypeLabel] = string(workflowType) - if workflowAction != "" { - p["kratix-workflow-action"] = string(workflowAction) - } - if pipelineName != "" { - p["kratix-workflow-pipeline-name"] = pipelineName - p[v1alpha1.PipelineNameLabel] = pipelineName - } - return p -} - -func (p pipelineLabels) WithRequestSHA(requestSHA string) pipelineLabels { - p[v1alpha1.KratixResourceHashLabel] = requestSHA - return p -} diff --git a/lib/pipeline/shared.go b/lib/pipeline/shared.go deleted file mode 100644 index f50a9aaf..00000000 --- a/lib/pipeline/shared.go +++ /dev/null @@ -1,225 +0,0 @@ -package pipeline - -import ( - "fmt" - "os" - "strings" - - "github.com/pkg/errors" - "github.com/syntasso/kratix/api/v1alpha1" - "github.com/syntasso/kratix/lib/resourceutil" - "gopkg.in/yaml.v2" - v1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" -) - -const ( - kratixActionEnvVar = "KRATIX_WORKFLOW_ACTION" - kratixTypeEnvVar = "KRATIX_WORKFLOW_TYPE" - kratixPromiseEnvVar = "KRATIX_PROMISE_NAME" -) - -func defaultPipelineVolumes() ([]v1.Volume, []v1.VolumeMount) { - volumes := []v1.Volume{ - {Name: "shared-input", VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}}, - {Name: "shared-output", VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}}, - {Name: "shared-metadata", VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}}, - } - volumeMounts := []v1.VolumeMount{ - {MountPath: "/kratix/input", Name: "shared-input", ReadOnly: true}, - {MountPath: "/kratix/output", Name: "shared-output"}, - {MountPath: "/kratix/metadata", Name: "shared-metadata"}, - } - return volumes, volumeMounts -} - -func serviceAccount(args PipelineArgs) *v1.ServiceAccount { - return &v1.ServiceAccount{ - ObjectMeta: metav1.ObjectMeta{ - Name: args.ServiceAccountName(), - Namespace: args.Namespace(), - Labels: args.Labels(), - }, - } -} - -func role(obj *unstructured.Unstructured, objPluralName string, args PipelineArgs) *rbacv1.Role { - return &rbacv1.Role{ - ObjectMeta: metav1.ObjectMeta{ - Name: args.RoleName(), - Labels: args.Labels(), - Namespace: args.Namespace(), - }, - Rules: []rbacv1.PolicyRule{ - { - APIGroups: []string{obj.GroupVersionKind().Group}, - Resources: []string{objPluralName, objPluralName + "/status"}, - Verbs: []string{"get", "list", "update", "create", "patch"}, - }, - { - APIGroups: []string{"platform.kratix.io"}, - Resources: []string{"works"}, - Verbs: []string{"*"}, - }, - }, - } -} - -func roleBinding(args PipelineArgs) *rbacv1.RoleBinding { - return &rbacv1.RoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: args.RoleBindingName(), - Labels: args.Labels(), - Namespace: args.Namespace(), - }, - RoleRef: rbacv1.RoleRef{ - Kind: "Role", - APIGroup: "rbac.authorization.k8s.io", - Name: args.RoleName(), - }, - Subjects: []rbacv1.Subject{ - { - Kind: "ServiceAccount", - Namespace: args.Namespace(), - Name: args.ServiceAccountName(), - }, - }, - } -} - -func clusterRole(args PipelineArgs) *rbacv1.ClusterRole { - return &rbacv1.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{ - Name: args.RoleName(), - Labels: args.Labels(), - }, - Rules: []rbacv1.PolicyRule{ - { - APIGroups: []string{"platform.kratix.io"}, - Resources: []string{v1alpha1.PromisePlural, v1alpha1.PromisePlural + "/status", "works"}, - Verbs: []string{"get", "list", "update", "create", "patch"}, - }, - }, - } -} - -func clusterRoleBinding(args PipelineArgs) *rbacv1.ClusterRoleBinding { - return &rbacv1.ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: args.RoleBindingName(), - Labels: args.Labels(), - }, - RoleRef: rbacv1.RoleRef{ - Kind: "ClusterRole", - APIGroup: "rbac.authorization.k8s.io", - Name: args.RoleName(), - }, - Subjects: []rbacv1.Subject{ - { - Kind: "ServiceAccount", - Namespace: args.Namespace(), - Name: args.ServiceAccountName(), - }, - }, - } -} - -func destinationSelectorsConfigMap(resources PipelineArgs, destinationSelectors []v1alpha1.PromiseScheduling, promiseWorkflowSelectors *v1alpha1.WorkloadGroupScheduling) (*v1.ConfigMap, error) { - workloadGroupScheduling := []v1alpha1.WorkloadGroupScheduling{} - for _, scheduling := range destinationSelectors { - workloadGroupScheduling = append(workloadGroupScheduling, v1alpha1.WorkloadGroupScheduling{ - MatchLabels: scheduling.MatchLabels, - Source: "promise", - }) - } - - if promiseWorkflowSelectors != nil { - workloadGroupScheduling = append(workloadGroupScheduling, *promiseWorkflowSelectors) - } - - schedulingYAML, err := yaml.Marshal(workloadGroupScheduling) - if err != nil { - return nil, errors.Wrap(err, "error marshalling destinationSelectors to yaml") - } - - return &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: resources.ConfigMapName(), - Namespace: resources.Namespace(), - Labels: resources.Labels(), - }, - Data: map[string]string{ - "destinationSelectors": string(schedulingYAML), - }, - }, nil -} - -func readerContainer(obj *unstructured.Unstructured, kratixWorkflowType v1alpha1.Type, volumeName string) v1.Container { - namespace := obj.GetNamespace() - if namespace == "" { - // if namespace is empty it means its a unnamespaced resource, so providing - // any value is valid for kubectl - namespace = v1alpha1.SystemNamespace - } - - readerContainer := v1.Container{ - Name: "reader", - Image: os.Getenv("WC_IMG"), - Env: []v1.EnvVar{ - {Name: "OBJECT_KIND", Value: strings.ToLower(obj.GetKind())}, - {Name: "OBJECT_GROUP", Value: obj.GroupVersionKind().Group}, - {Name: "OBJECT_NAME", Value: obj.GetName()}, - {Name: "OBJECT_NAMESPACE", Value: namespace}, - {Name: "KRATIX_WORKFLOW_TYPE", Value: string(kratixWorkflowType)}, - }, - VolumeMounts: []v1.VolumeMount{ - {MountPath: "/kratix/input", Name: "shared-input"}, - {MountPath: "/kratix/output", Name: "shared-output"}, - }, - Command: []string{"sh", "-c", "reader"}, - } - return readerContainer -} - -func generateContainersAndVolumes(obj *unstructured.Unstructured, workflowType v1alpha1.Type, pipeline v1alpha1.Pipeline, kratixEnvVars []v1.EnvVar) ([]v1.Container, []v1.Volume) { - volumes, defaultVolumeMounts := defaultPipelineVolumes() - - readerContainer := readerContainer(obj, workflowType, "shared-input") - containers := []v1.Container{ - readerContainer, - } - - if len(pipeline.Spec.Volumes) > 0 { - volumes = append(volumes, pipeline.Spec.Volumes...) - } - - for _, c := range pipeline.Spec.Containers { - containerVolumeMounts := append(defaultVolumeMounts, c.VolumeMounts...) - - containers = append(containers, v1.Container{ - Name: c.Name, - Image: c.Image, - VolumeMounts: containerVolumeMounts, - Args: c.Args, - Command: c.Command, - Env: append(kratixEnvVars, c.Env...), - EnvFrom: c.EnvFrom, - ImagePullPolicy: c.ImagePullPolicy, - }) - } - - return containers, volumes -} - -func pipelineName(promiseIdentifier, resourceIdentifier, objectName, pipelineName string) string { - var promiseResource = promiseIdentifier - if resourceIdentifier != "" { - promiseResource = fmt.Sprintf("%s-%s", promiseIdentifier, objectName) - } - - pipelineIdentifier := fmt.Sprintf("kratix-%s-%s", promiseResource, pipelineName) - - return resourceutil.GenerateObjectName(pipelineIdentifier) -} diff --git a/lib/workflow/reconciler.go b/lib/workflow/reconciler.go index 08872713..d655b0f7 100644 --- a/lib/workflow/reconciler.go +++ b/lib/workflow/reconciler.go @@ -23,25 +23,18 @@ type Opts struct { logger logr.Logger parentObject *unstructured.Unstructured //TODO make this field private too? or everything public and no constructor func - Pipelines []Pipeline + Resources []v1alpha1.PipelineJobResources source string } -type Pipeline struct { - Name string - Job *batchv1.Job - // ServiceAccount, Role, Rolebinding, ConfigMap etc (differs for delete vs configure) - JobRequiredResources []client.Object -} - -func NewOpts(ctx context.Context, client client.Client, logger logr.Logger, parentObj *unstructured.Unstructured, pipelines []Pipeline, source string) Opts { +func NewOpts(ctx context.Context, client client.Client, logger logr.Logger, parentObj *unstructured.Unstructured, resources []v1alpha1.PipelineJobResources, source string) Opts { return Opts{ ctx: ctx, client: client, logger: logger, parentObject: parentObj, source: source, - Pipelines: pipelines, + Resources: resources, } } @@ -49,15 +42,15 @@ func NewOpts(ctx context.Context, client client.Client, logger logr.Logger, pare func ReconcileDelete(opts Opts) (bool, error) { opts.logger.Info("Reconciling Delete Pipeline") - if len(opts.Pipelines) == 0 { + if len(opts.Resources) == 0 { return false, nil } - if len(opts.Pipelines) > 1 { + if len(opts.Resources) > 1 { opts.logger.Info("Multiple delete pipeline found but only one delete pipeline is currently supported. Ignoring all but the first") } - pipeline := opts.Pipelines[0] + pipeline := opts.Resources[0] existingDeletePipeline, err := getDeletePipeline(opts, opts.parentObject.GetNamespace(), pipeline) if err != nil { return false, err @@ -67,7 +60,7 @@ func ReconcileDelete(opts Opts) (bool, error) { opts.logger.Info("Creating Delete Pipeline. The pipeline will now execute...") //TODO retrieve error information from applyResources to return to the caller - applyResources(opts, append(pipeline.JobRequiredResources, pipeline.Job)...) + applyResources(opts, append(pipeline.RequiredResources, pipeline.Job)...) return true, nil } @@ -105,8 +98,8 @@ func ReconcileConfigure(opts Opts) (bool, error) { pipelineIndex = nextPipelineIndex(opts, mostRecentJob) } - if pipelineIndex >= len(opts.Pipelines) { - pipelineIndex = len(opts.Pipelines) - 1 + if pipelineIndex >= len(opts.Resources) { + pipelineIndex = len(opts.Resources) - 1 } if pipelineIndex < 0 { @@ -121,7 +114,7 @@ func ReconcileConfigure(opts Opts) (bool, error) { opts.logger.Info("Reconciling Configure workflow", "pipelineIndex", pipelineIndex, "mostRecentJob", mostRecentJobName) - pipeline := opts.Pipelines[pipelineIndex] + pipeline := opts.Resources[pipelineIndex] opts.logger = originalLogger.WithName(pipeline.Name) if jobIsForPipeline(pipeline, mostRecentJob) { @@ -168,7 +161,7 @@ func ReconcileConfigure(opts Opts) (bool, error) { return createConfigurePipeline(opts, pipelineIndex, pipeline) } -func getLabelsForPipelineJob(pipeline Pipeline) map[string]string { +func getLabelsForPipelineJob(pipeline v1alpha1.PipelineJobResources) map[string]string { labels := pipeline.Job.DeepCopy().GetLabels() return labels } @@ -186,7 +179,7 @@ func labelsForJobs(opts Opts) map[string]string { return l } -func labelsForAllPipelineJobs(pipeline Pipeline) map[string]string { +func labelsForAllPipelineJobs(pipeline v1alpha1.PipelineJobResources) map[string]string { pipelineLabels := pipeline.Job.GetLabels() labels := map[string]string{ v1alpha1.PromiseNameLabel: pipelineLabels[v1alpha1.PromiseNameLabel], @@ -197,7 +190,7 @@ func labelsForAllPipelineJobs(pipeline Pipeline) map[string]string { return labels } -func jobIsForPipeline(pipeline Pipeline, job *batchv1.Job) bool { +func jobIsForPipeline(pipeline v1alpha1.PipelineJobResources, job *batchv1.Job) bool { if job == nil { return false } @@ -218,10 +211,10 @@ func nextPipelineIndex(opts Opts, mostRecentJob *batchv1.Job) int { return 0 } - i := len(opts.Pipelines) - 1 + i := len(opts.Resources) - 1 for i >= 0 { - if jobIsForPipeline(opts.Pipelines[i], mostRecentJob) { - opts.logger.Info("Found job for pipeline", "pipeline", opts.Pipelines[i].Name, "index", i) + if jobIsForPipeline(opts.Resources[i], mostRecentJob) { + opts.logger.Info("Found job for pipeline", "pipeline", opts.Resources[i].Name, "index", i) if isFailed(mostRecentJob) || isRunning(mostRecentJob) { return i } @@ -266,7 +259,7 @@ func isRunning(job *batchv1.Job) bool { func cleanup(opts Opts, namespace string) error { if opts.source == "promise" { - for _, pipeline := range opts.Pipelines { + for _, pipeline := range opts.Resources { if err := deleteConfigMap(opts, pipeline); err != nil { return err } @@ -274,7 +267,7 @@ func cleanup(opts Opts, namespace string) error { } pipelineNames := map[string]bool{} - for _, pipeline := range opts.Pipelines { + for _, pipeline := range opts.Resources { l := labelsForAllPipelineJobs(pipeline) l[v1alpha1.PipelineNameLabel] = pipeline.Name pipelineNames[pipeline.Name] = true @@ -331,9 +324,9 @@ func deleteAllButLastFiveJobs(opts Opts, pipelineJobsAtCurrentSpec []batchv1.Job return nil } -func deleteConfigMap(opts Opts, pipeline Pipeline) error { +func deleteConfigMap(opts Opts, pipeline v1alpha1.PipelineJobResources) error { configMap := &v1.ConfigMap{} - for _, resource := range pipeline.JobRequiredResources { + for _, resource := range pipeline.RequiredResources { if _, ok := resource.(*v1.ConfigMap); ok { configMap = resource.(*v1.ConfigMap) break @@ -351,7 +344,7 @@ func deleteConfigMap(opts Opts, pipeline Pipeline) error { return nil } -func createConfigurePipeline(opts Opts, pipelineIndex int, pipeline Pipeline) (bool, error) { +func createConfigurePipeline(opts Opts, pipelineIndex int, pipeline v1alpha1.PipelineJobResources) (bool, error) { updated, err := setPipelineCompletedConditionStatus(opts, pipelineIndex == 0, opts.parentObject) if err != nil || updated { return updated, err @@ -359,7 +352,7 @@ func createConfigurePipeline(opts Opts, pipelineIndex int, pipeline Pipeline) (b opts.logger.Info("Triggering Promise pipeline") - applyResources(opts, append(pipeline.JobRequiredResources, pipeline.Job)...) + applyResources(opts, append(pipeline.RequiredResources, pipeline.Job)...) opts.logger.Info("Parent object:", "parent", opts.parentObject.GetName()) if isManualReconciliation(opts.parentObject.GetLabels()) { @@ -403,7 +396,7 @@ func setPipelineCompletedConditionStatus(opts Opts, isTheFirstPipeline bool, obj return false, nil } -func getDeletePipeline(opts Opts, namespace string, pipeline Pipeline) (*batchv1.Job, error) { +func getDeletePipeline(opts Opts, namespace string, pipeline v1alpha1.PipelineJobResources) (*batchv1.Job, error) { labels := getLabelsForPipelineJob(pipeline) jobs, err := getJobsWithLabels(opts, labels, namespace) if err != nil || len(jobs) == 0 { diff --git a/lib/workflow/reconciler_test.go b/lib/workflow/reconciler_test.go index 70fdfbb3..438ec156 100644 --- a/lib/workflow/reconciler_test.go +++ b/lib/workflow/reconciler_test.go @@ -8,7 +8,6 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/syntasso/kratix/api/v1alpha1" - "github.com/syntasso/kratix/lib/pipeline" "github.com/syntasso/kratix/lib/resourceutil" "github.com/syntasso/kratix/lib/workflow" batchv1 "k8s.io/api/batch/v1" @@ -25,7 +24,7 @@ var namespace = "kratix-platform-system" var _ = Describe("Workflow Reconciler", func() { var promise v1alpha1.Promise - var workflowPipelines []workflow.Pipeline + var workflowPipelines []v1alpha1.PipelineJobResources var uPromise *unstructured.Unstructured var pipelines []v1alpha1.Pipeline @@ -160,7 +159,7 @@ var _ = Describe("Workflow Reconciler", func() { }) When("the parent is later manually reconciled", func() { - var newWorkflowPipelines []workflow.Pipeline + var newWorkflowPipelines []v1alpha1.PipelineJobResources BeforeEach(func() { labelPromiseForManualReconciliation("redis") @@ -173,14 +172,14 @@ var _ = Describe("Workflow Reconciler", func() { Expect(err).NotTo(HaveOccurred()) jobList := listJobs(namespace) Expect(jobList).To(HaveLen(2)) - Expect(findByName(jobList, newWorkflowPipelines[0].Job.Name)).To(BeTrue()) + Expect(findByName(jobList, newWorkflowPipelines[0].Job.GetName())).To(BeTrue()) }) }) }) }) When("the promise spec is updated", func() { - var updatedWorkflowPipeline []workflow.Pipeline + var updatedWorkflowPipeline []v1alpha1.PipelineJobResources BeforeEach(func() { Expect(fakeK8sClient.Get(ctx, types.NamespacedName{Name: "redis"}, &promise)).To(Succeed()) @@ -209,7 +208,7 @@ var _ = Describe("Workflow Reconciler", func() { }) When("there are jobs for the promise at this spec", func() { - var originalWorkflowPipelines []workflow.Pipeline + var originalWorkflowPipelines []v1alpha1.PipelineJobResources BeforeEach(func() { // Run the original pipeline jobs to completion, so they exist in the @@ -380,7 +379,7 @@ var _ = Describe("Workflow Reconciler", func() { }) When("all pipelines have executed", func() { - var updatedWorkflows []workflow.Pipeline + var updatedWorkflows []v1alpha1.PipelineJobResources BeforeEach(func() { Expect(fakeK8sClient.Create(ctx, workflowPipelines[0].Job)).To(Succeed()) @@ -618,7 +617,7 @@ var _ = Describe("Workflow Reconciler", func() { Describe("ReconcileDelete", func() { When("there are no pipelines to reconcile", func() { It("considers the workflow as completed", func() { - opts := workflow.NewOpts(ctx, fakeK8sClient, logger, nil, []workflow.Pipeline{}, "promise") + opts := workflow.NewOpts(ctx, fakeK8sClient, logger, nil, []v1alpha1.PipelineJobResources{}, "promise") requeue, err := workflow.ReconcileDelete(opts) Expect(err).NotTo(HaveOccurred()) Expect(requeue).To(BeFalse()) @@ -686,7 +685,7 @@ func createStaticDependencyWork(promiseName string) { Expect(fakeK8sClient.Create(ctx, &work)).To(Succeed()) } -func setupTest(promise v1alpha1.Promise, pipelines []v1alpha1.Pipeline) ([]workflow.Pipeline, *unstructured.Unstructured) { +func setupTest(promise v1alpha1.Promise, pipelines []v1alpha1.Pipeline) ([]v1alpha1.PipelineJobResources, *unstructured.Unstructured) { var err error p := v1alpha1.Promise{} Expect(fakeK8sClient.Get(ctx, types.NamespacedName{Name: promise.GetName()}, &p)).To(Succeed()) @@ -699,21 +698,19 @@ func setupTest(promise v1alpha1.Promise, pipelines []v1alpha1.Pipeline) ([]workf jobs := []*batchv1.Job{} otherResources := [][]client.Object{} for _, p := range pipelines { - generatedResources, err := pipeline.NewConfigurePromise( - uPromise, p, promise.Name, nil, logger, - ) + generatedResources, err := p.ForPromise(&promise, v1alpha1.WorkflowActionConfigure).Resources(nil) Expect(err).NotTo(HaveOccurred()) - jobs = append(jobs, generatedResources[4].(*batchv1.Job)) - otherResources = append(otherResources, generatedResources[0:4]) + jobs = append(jobs, generatedResources.Job) + otherResources = append(otherResources, generatedResources.RequiredResources) } - workflowPipelines := []workflow.Pipeline{} + workflowPipelines := []v1alpha1.PipelineJobResources{} for i, j := range jobs { j.SetCreationTimestamp(nextTimestamp()) - workflowPipelines = append(workflowPipelines, workflow.Pipeline{ - JobRequiredResources: otherResources[i], - Job: j, - Name: j.GetLabels()["kratix.io/pipeline-name"], + workflowPipelines = append(workflowPipelines, v1alpha1.PipelineJobResources{ + Name: j.GetLabels()["kratix.io/pipeline-name"], + Job: j, + RequiredResources: otherResources[i], }) } @@ -751,9 +748,10 @@ func markJobAs(conditionType batchv1.JobConditionType, name string) { }, } - if conditionType == batchv1.JobComplete { + switch conditionType { + case batchv1.JobComplete: job.Status.Succeeded = 1 - } else if conditionType == batchv1.JobFailed { + case batchv1.JobFailed: job.Status.Failed = 1 } diff --git a/work-creator/pipeline/work_creator.go b/work-creator/pipeline/work_creator.go index be7c121c..1d68959f 100644 --- a/work-creator/pipeline/work_creator.go +++ b/work-creator/pipeline/work_creator.go @@ -4,6 +4,7 @@ import ( "context" "crypto/md5" "fmt" + "github.com/syntasso/kratix/lib/objectutil" "io" "os" "path/filepath" @@ -142,7 +143,7 @@ func (w *WorkCreator) Execute(rootDirectory, promiseName, namespace, resourceNam work := &v1alpha1.Work{} - work.Name = resourceutil.GenerateObjectName(identifier) + work.Name = objectutil.GenerateObjectName(identifier) work.Namespace = namespace work.Spec.WorkloadGroups = workloadGroups work.Spec.PromiseName = promiseName