From 836f32dd56ddccbf1db3b70d67cbb8400c010abf Mon Sep 17 00:00:00 2001 From: Simon Murray Date: Wed, 7 Aug 2024 15:58:26 +0100 Subject: [PATCH] Use Better Provisioner Propagation This propagates remote clusters and background deletion flags to the application provisioners are lot more transparently via the request context. --- go.mod | 2 +- go.sum | 4 +-- .../helmapplications/clusteropenstack/hack.go | 7 +++- .../clusteropenstack/remotecluster.go | 7 +++- .../openstackplugincindercsi/provisioner.go | 5 ++- .../helmapplications/vcluster/config.go | 7 +++- .../managers/cluster/provisioner.go | 33 +++++++------------ .../managers/clustermanager/provisioner.go | 15 +++++---- pkg/server/handler/cluster/client.go | 6 +++- 9 files changed, 51 insertions(+), 35 deletions(-) diff --git a/go.mod b/go.mod index 2295ef59..21f1fea2 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/prometheus/client_golang v1.19.1 github.com/spdx/tools-golang v0.5.5 github.com/spf13/pflag v1.0.5 - github.com/unikorn-cloud/core v0.1.63 + github.com/unikorn-cloud/core v0.1.64 github.com/unikorn-cloud/identity v0.2.29 github.com/unikorn-cloud/region v0.1.30 go.opentelemetry.io/otel v1.28.0 diff --git a/go.sum b/go.sum index aa1257f8..630ce64e 100644 --- a/go.sum +++ b/go.sum @@ -181,8 +181,8 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= -github.com/unikorn-cloud/core v0.1.63 h1:Jl/xuoGRKESMXhS1+apcaS/1I776agTyT75BGz9AKBA= -github.com/unikorn-cloud/core v0.1.63/go.mod h1:JcUIQW3+oiZPUQmOlENw3OCi35IBxPKa+J4MbP3TO7k= +github.com/unikorn-cloud/core v0.1.64 h1:4GqACg1YOmUrDo5euXz0yi753Mzx1TBg7fioRKysb1g= +github.com/unikorn-cloud/core v0.1.64/go.mod h1:JcUIQW3+oiZPUQmOlENw3OCi35IBxPKa+J4MbP3TO7k= github.com/unikorn-cloud/identity v0.2.29 h1:kKEJmh6tjjdvZWYdZhyRewG3aHf9wmWwG5C/kb+Rm9A= github.com/unikorn-cloud/identity v0.2.29/go.mod h1:ujrL+6kRUrPIk4Z0Yc12A+FDy6L4b2Hgzz6oGZlKfGI= github.com/unikorn-cloud/region v0.1.30 h1:r5fTsmU9ub1VpYk3cZBUFZjNDaYW8OOtIGPbvxbN++s= diff --git a/pkg/provisioners/helmapplications/clusteropenstack/hack.go b/pkg/provisioners/helmapplications/clusteropenstack/hack.go index 24394da3..dfa30bcb 100644 --- a/pkg/provisioners/helmapplications/clusteropenstack/hack.go +++ b/pkg/provisioners/helmapplications/clusteropenstack/hack.go @@ -221,7 +221,12 @@ func (p *Provisioner) deleteOrphanedMachineDeployments(ctx context.Context) erro //nolint:forcetypeassert cluster := application.FromContext(ctx).(*unikornv1.KubernetesCluster) - client := coreclient.DynamicClientFromContext(ctx) + clusterContext, err := coreclient.ClusterFromContext(ctx) + if err != nil { + return err + } + + client := clusterContext.Client deployments, err := p.getMachineDeployments(ctx, client) if err != nil { diff --git a/pkg/provisioners/helmapplications/clusteropenstack/remotecluster.go b/pkg/provisioners/helmapplications/clusteropenstack/remotecluster.go index ec0a4fb6..ab752f61 100644 --- a/pkg/provisioners/helmapplications/clusteropenstack/remotecluster.go +++ b/pkg/provisioners/helmapplications/clusteropenstack/remotecluster.go @@ -112,8 +112,13 @@ func (g *RemoteCluster) Config(ctx context.Context) (*clientcmdapi.Config, error Name: KubeconfigSecretName(g.cluster), } + clusterContext, err := coreclient.ClusterFromContext(ctx) + if err != nil { + return nil, err + } + // Retry getting the secret until it exists. - if err := coreclient.DynamicClientFromContext(ctx).Get(ctx, secretKey, secret); err != nil { + if err := clusterContext.Client.Get(ctx, secretKey, secret); err != nil { if errors.IsNotFound(err) { log.Info("kubernetes cluster kubeconfig does not exist, yielding") diff --git a/pkg/provisioners/helmapplications/openstackplugincindercsi/provisioner.go b/pkg/provisioners/helmapplications/openstackplugincindercsi/provisioner.go index ceba6357..78817549 100644 --- a/pkg/provisioners/helmapplications/openstackplugincindercsi/provisioner.go +++ b/pkg/provisioners/helmapplications/openstackplugincindercsi/provisioner.go @@ -85,7 +85,10 @@ func (p *Provisioner) Values(ctx context.Context, version *string) (interface{}, //nolint:forcetypeassert cluster := application.FromContext(ctx).(*unikornv1.KubernetesCluster) - client := coreclient.DynamicClientFromContext(ctx) + client, err := coreclient.ProvisionerClientFromContext(ctx) + if err != nil { + return nil, err + } storageClasses := p.generateStorageClasses(cluster) diff --git a/pkg/provisioners/helmapplications/vcluster/config.go b/pkg/provisioners/helmapplications/vcluster/config.go index f67385b7..c996fe05 100644 --- a/pkg/provisioners/helmapplications/vcluster/config.go +++ b/pkg/provisioners/helmapplications/vcluster/config.go @@ -99,8 +99,13 @@ func (c *ControllerRuntimeClient) RESTConfig(ctx context.Context, namespace, nam func (c *ControllerRuntimeClient) GetSecret(ctx context.Context, namespace, name string) (*corev1.Secret, error) { log := log.FromContext(ctx) + clusterContext, err := coreclient.ClusterFromContext(ctx) + if err != nil { + return nil, err + } + secret := &corev1.Secret{} - if err := coreclient.DynamicClientFromContext(ctx).Get(ctx, client.ObjectKey{Namespace: namespace, Name: name}, secret); err != nil { + if err := clusterContext.Client.Get(ctx, client.ObjectKey{Namespace: namespace, Name: name}, secret); err != nil { if kerrors.IsNotFound(err) { log.Info("vitual cluster kubeconfig does not exist, yielding") diff --git a/pkg/provisioners/managers/cluster/provisioner.go b/pkg/provisioners/managers/cluster/provisioner.go index 11f433c1..545b56be 100644 --- a/pkg/provisioners/managers/cluster/provisioner.go +++ b/pkg/provisioners/managers/cluster/provisioner.go @@ -31,7 +31,6 @@ import ( "github.com/unikorn-cloud/core/pkg/provisioners/conditional" "github.com/unikorn-cloud/core/pkg/provisioners/remotecluster" "github.com/unikorn-cloud/core/pkg/provisioners/serial" - provisionersutil "github.com/unikorn-cloud/core/pkg/provisioners/util" "github.com/unikorn-cloud/core/pkg/util" unikornv1 "github.com/unikorn-cloud/kubernetes/pkg/apis/unikorn/v1alpha1" "github.com/unikorn-cloud/kubernetes/pkg/provisioners/helmapplications/cilium" @@ -44,8 +43,6 @@ import ( "github.com/unikorn-cloud/kubernetes/pkg/provisioners/helmapplications/openstackplugincindercsi" "github.com/unikorn-cloud/kubernetes/pkg/provisioners/helmapplications/vcluster" - "k8s.io/apimachinery/pkg/labels" - "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -67,7 +64,10 @@ func newApplicationReferenceGetter(cluster *unikornv1.KubernetesCluster) *Applic func (a *ApplicationReferenceGetter) getApplication(ctx context.Context, name string) (*unikornv1core.ApplicationReference, error) { // TODO: we could cache this, it's from a cache anyway, so quite cheap... - cli := coreclient.StaticClientFromContext(ctx) + cli, err := coreclient.ProvisionerClientFromContext(ctx) + if err != nil { + return nil, err + } key := client.ObjectKey{ Name: *a.cluster.Spec.ApplicationBundle, @@ -136,25 +136,19 @@ func (p *Provisioner) Object() unikornv1core.ManagableResourceInterface { // getClusterManager gets the control plane object that owns this cluster. func (p *Provisioner) getClusterManager(ctx context.Context) (*unikornv1.ClusterManager, error) { - // TODO: error checking. - projectLabels := labels.Set{ - constants.KindLabel: constants.KindLabelValueProject, - constants.ProjectLabel: p.cluster.Labels[constants.ProjectLabel], - } - - projectNamespace, err := provisionersutil.GetResourceNamespace(ctx, projectLabels) + cli, err := coreclient.ProvisionerClientFromContext(ctx) if err != nil { return nil, err } - var clusterManager unikornv1.ClusterManager - key := client.ObjectKey{ - Namespace: projectNamespace.Name, + Namespace: p.cluster.Namespace, Name: p.cluster.Spec.ClusterManagerID, } - if err := coreclient.StaticClientFromContext(ctx).Get(ctx, key, &clusterManager); err != nil { + var clusterManager unikornv1.ClusterManager + + if err := cli.Get(ctx, key, &clusterManager); err != nil { return nil, fmt.Errorf("%w: %s", ErrClusterManager, err.Error()) } @@ -228,15 +222,12 @@ func (p *Provisioner) getProvisioner(ctx context.Context) (provisioners.Provisio // out, so we essentially have to defer cluster creation until we know the // manager is working and Argo isn't going to fail. func (p *Provisioner) managerReady(ctx context.Context) error { - cli := coreclient.StaticClientFromContext(ctx) - - manager := &unikornv1.ClusterManager{} - - if err := cli.Get(ctx, client.ObjectKey{Namespace: p.cluster.Namespace, Name: p.cluster.Spec.ClusterManagerID}, manager); err != nil { + clusterManager, err := p.getClusterManager(ctx) + if err != nil { return err } - condition, err := manager.StatusConditionRead(unikornv1core.ConditionAvailable) + condition, err := clusterManager.StatusConditionRead(unikornv1core.ConditionAvailable) if err != nil { return err } diff --git a/pkg/provisioners/managers/clustermanager/provisioner.go b/pkg/provisioners/managers/clustermanager/provisioner.go index ae7e3a7f..3a7dd617 100644 --- a/pkg/provisioners/managers/clustermanager/provisioner.go +++ b/pkg/provisioners/managers/clustermanager/provisioner.go @@ -67,7 +67,10 @@ func newApplicationReferenceGetter(clusterManager *unikornv1.ClusterManager) *Ap } func (a *ApplicationReferenceGetter) getApplication(ctx context.Context, name string) (*unikornv1core.ApplicationReference, error) { - cli := coreclient.StaticClientFromContext(ctx) + cli, err := coreclient.ProvisionerClientFromContext(ctx) + if err != nil { + return nil, err + } key := client.ObjectKey{ Name: *a.clusterManager.Spec.ApplicationBundle, @@ -128,14 +131,11 @@ func (p *Provisioner) getClusterManagerProvisioner() provisioners.Provisioner { clusterapi.New(apps.clusterAPI), ) - // Set up deletion semantics. - clusterAPIProvisioner.BackgroundDeletion() - // Provision the vitual cluster, setup the remote cluster then // install cert manager and cluster API into it. return serial.New("control plane", vcluster.New(apps.vCluster, p.clusterManager.Name).InNamespace(p.clusterManager.Namespace), - remoteClusterManager.ProvisionOn(clusterAPIProvisioner), + remoteClusterManager.ProvisionOn(clusterAPIProvisioner, remotecluster.BackgroundDeletion), ) } @@ -164,7 +164,10 @@ func (p *Provisioner) Deprovision(ctx context.Context) error { // When deleting a manager, you need to also delete any managed clusters // first to free up compute resources from the provider, so block until // this is done. - cli := coreclient.StaticClientFromContext(ctx) + cli, err := coreclient.ProvisionerClientFromContext(ctx) + if err != nil { + return err + } var clusters unikornv1.KubernetesClusterList diff --git a/pkg/server/handler/cluster/client.go b/pkg/server/handler/cluster/client.go index a43c4018..3918c57a 100644 --- a/pkg/server/handler/cluster/client.go +++ b/pkg/server/handler/cluster/client.go @@ -148,7 +148,11 @@ func (c *Client) GetKubeconfig(ctx context.Context, organizationID, projectID, c // TODO: propagate the client like we do in the controllers, then code sharing // becomes a lot easier! - ctx = coreclient.NewContextWithDynamicClient(ctx, c.client) + clusterContext := &coreclient.ClusterContext{ + Client: c.client, + } + + ctx = coreclient.NewContextWithCluster(ctx, clusterContext) vc := vcluster.NewControllerRuntimeClient()