diff --git a/Makefile b/Makefile index 396c640ce..c12e68617 100644 --- a/Makefile +++ b/Makefile @@ -293,21 +293,25 @@ dev-aws-creds: yq dev-azure-creds: envsubst @NAMESPACE=$(NAMESPACE) $(ENVSUBST) -no-unset -i config/dev/azure-credentials.yaml | $(KUBECTL) apply -f - +.PHONY: dev-vsphere-creds +dev-vsphere-creds: envsubst + @NAMESPACE=$(NAMESPACE) $(ENVSUBST) -no-unset -i config/dev/vsphere-credentials.yaml | $(KUBECTL) apply -f - + .PHONY: dev-apply dev-apply: kind-deploy registry-deploy dev-push dev-deploy dev-templates .PHONY: dev-destroy dev-destroy: kind-undeploy registry-undeploy ## Destroy the development environment by deleting the kind cluster and local registry. -.PHONY: dev-provider-apply -dev-provider-apply: envsubst +.PHONY: dev-mcluster-apply +dev-mcluster-apply: envsubst @if [ $(DEV_PROVIDER) = "aws" ]; then \ $(MAKE) dev-aws-creds; \ fi @NAMESPACE=$(NAMESPACE) $(ENVSUBST) -no-unset -i config/dev/$(DEV_PROVIDER)-managedcluster.yaml | $(KUBECTL) apply -f - -.PHONY: dev-provider-delete -dev-provider-delete: envsubst +.PHONY: dev-mcluster-delete +dev-mcluster-delete: envsubst @NAMESPACE=$(NAMESPACE) $(ENVSUBST) -no-unset -i config/dev/$(DEV_PROVIDER)-managedcluster.yaml | $(KUBECTL) delete -f - .PHONY: dev-creds-apply @@ -320,12 +324,6 @@ dev-aws-nuke: ## Warning: Destructive! Nuke all AWS resources deployed by 'DEV_P @rm config/dev/cloud_nuke.yaml @CLUSTER_NAME=$(CLUSTER_NAME) YQ=$(YQ) AWSCLI=$(AWSCLI) bash -c ./scripts/aws-nuke-ccm.sh -.PHONY: test-apply -test-apply: kind-deploy registry-deploy dev-push dev-deploy dev-templates - -.PHONY: test-destroy -test-destroy: kind-undeploy registry-undeploy - .PHONY: cli-install cli-install: clusterawsadm clusterctl cloud-nuke yq awscli ## Install the necessary CLI tools for deployment, development and testing. diff --git a/config/dev/vsphere-credentials.yaml b/config/dev/vsphere-credentials.yaml new file mode 100644 index 000000000..c9efc9f3a --- /dev/null +++ b/config/dev/vsphere-credentials.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereClusterIdentity +metadata: + name: vsphere-cluster-identity + namespace: ${NAMESPACE} +spec: + secretName: vsphere-cluster-identity-secret + allowedNamespaces: + selector: + matchLabels: {} +--- +apiVersion: v1 +kind: Secret +metadata: + name: vsphere-cluster-identity-secret + namespace: ${NAMESPACE} +stringData: + username: ${VSPHERE_USER} + password: ${VSPHERE_PASSWORD} diff --git a/config/dev/vsphere-managedcluster.yaml b/config/dev/vsphere-managedcluster.yaml new file mode 100644 index 000000000..837614be2 --- /dev/null +++ b/config/dev/vsphere-managedcluster.yaml @@ -0,0 +1,42 @@ +apiVersion: hmc.mirantis.com/v1alpha1 +kind: ManagedCluster +metadata: + name: vsphere-dev + namespace: ${NAMESPACE} +spec: + template: vsphere-standalone-cp + config: + controlPlaneNumber: 1 + workersNumber: 1 + clusterIdentity: + name: vsphere-cluster-identity + vsphere: + server: ${VSPHERE_SERVER} + thumbprint: ${VSPHERE_THUMBPRINT} + datacenter: ${VSPHERE_DATACENTER} + datastore: ${VSPHERE_DATASTORE} + resourcePool: ${VSPHERE_RESOURCEPOOL} + folder: ${VSPHERE_FOLDER} + username: ${VSPHERE_USER} + password: ${VSPHERE_PASSWORD} + controlPlaneEndpointIP: ${VSPHERE_CONTROL_PLANE_ENDPOINT} + + controlPlane: + ssh: + user: ubuntu + publicKey: ${VSPHERE_SSH_KEY} + rootVolumeSize: 50 + cpus: 4 + memory: 4096 + vmTemplate: ${VSPHERE_VM_TEMPLATE} + network: ${VSPHERE_NETWORK} + + worker: + ssh: + user: ubuntu + publicKey: ${VSPHERE_SSH_KEY} + rootVolumeSize: 50 + cpus: 4 + memory: 4096 + vmTemplate: ${VSPHERE_VM_TEMPLATE} + network: ${VSPHERE_NETWORK} diff --git a/docs/dev.md b/docs/dev.md index 848959399..c07b787a0 100644 --- a/docs/dev.md +++ b/docs/dev.md @@ -38,6 +38,30 @@ be set before running deployment: More detailed description of these parameters can be found [here](azure/cluster-parameters.md). +### vSphere Provider Setup + +Follow the instruction on how to configure [vSphere Provider](vsphere/main.md). + +To properly deploy dev cluster you need to have the following variables set: + +- `VSPHERE_USER` +- `VSPHERE_PASSWORD` +- `VSPHERE_SERVER` +- `VSPHERE_THUMBPRINT` +- `VSPHERE_DATACENTER` +- `VSPHERE_DATASTORE` +- `VSPHERE_RESOURCEPOOL` +- `VSPHERE_FOLDER` +- `VSPHERE_CONTROL_PLANE_ENDPOINT` +- `VSPHERE_VM_TEMPLATE` +- `VSPHERE_NETWORK` +- `VSPHERE_SSH_KEY` + +Naming of the variables duplicates parameters in `ManagementCluster`. To get +full explanation for each parameter visit +[vSphere cluster parameters](cluster-parameters.md) and +[vSphere machine parameters](machine-parameters.md). + ## Deploy HMC Default provider which will be used to deploy cluster is AWS, if you want to use @@ -57,7 +81,7 @@ running make (e.g. `export DEV_PROVIDER=azure`). 4. Apply credentials for your provider by executing `make dev-creds-apply`. -5. Run `make dev-provider-apply` to deploy managed cluster on provider of your +5. Run `make dev-mcluster-apply` to deploy managed cluster on provider of your choice with default configuration. 6. Wait for infrastructure to be provisioned and the cluster to be deployed. You diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 0d3b748d3..70d463575 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -33,6 +33,7 @@ import ( "github.com/Mirantis/hmc/test/kubeclient" "github.com/Mirantis/hmc/test/managedcluster" "github.com/Mirantis/hmc/test/utils" + vsphereutils "github.com/Mirantis/hmc/test/utils/vsphere" ) const ( @@ -43,14 +44,14 @@ const ( var _ = Describe("controller", Ordered, func() { BeforeAll(func() { By("building and deploying the controller-manager") - cmd := exec.Command("make", "test-apply") + cmd := exec.Command("make", "dev-apply") _, err := utils.Run(cmd) Expect(err).NotTo(HaveOccurred()) }) AfterAll(func() { By("removing the controller-manager") - cmd := exec.Command("make", "test-destroy") + cmd := exec.Command("make", "dev-destroy") _, err := utils.Run(cmd) Expect(err).NotTo(HaveOccurred()) }) @@ -71,6 +72,7 @@ var _ = Describe("controller", Ordered, func() { managedcluster.ProviderCAPI, managedcluster.ProviderAWS, managedcluster.ProviderAzure, + managedcluster.ProviderVSphere, } { // Ensure only one controller pod is running. if err := verifyControllerUp(kc, managedcluster.GetProviderLabel(provider), string(provider)); err != nil { @@ -143,7 +145,7 @@ var _ = Describe("controller", Ordered, func() { } By("creating a Deployment") - d := managedcluster.GetUnstructured(managedcluster.ProviderAWS, template) + d := managedcluster.GetUnstructured(template) clusterName = d.GetName() deleteFunc, err = kc.CreateManagedCluster(context.Background(), d) @@ -151,7 +153,7 @@ var _ = Describe("controller", Ordered, func() { By("waiting for infrastructure providers to deploy successfully") Eventually(func() error { - return managedcluster.VerifyProviderDeployed(context.Background(), kc, clusterName) + return managedcluster.VerifyProviderDeployed(context.Background(), kc, clusterName, managedcluster.ProviderAWS) }).WithTimeout(30 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) By("verify the deployment deletes successfully") @@ -163,6 +165,74 @@ var _ = Describe("controller", Ordered, func() { }) } }) + + Context("vSphere templates", func() { + var ( + kc *kubeclient.KubeClient + deleteFunc func() error + clusterName string + err error + ) + + BeforeAll(func() { + // Set here to skip CI runs for now + _, testVsphere := os.LookupEnv("TEST_VSPHERE") + if !testVsphere { + Skip("Skipping vSphere tests") + } + + By("ensuring that env vars are set correctly") + Expect(vsphereutils.CheckEnv()).Should(Succeed()) + By("creating kube client") + kc, err = kubeclient.NewFromLocal(namespace) + Expect(err).NotTo(HaveOccurred()) + By("providing cluster identity") + credSecretName := "vsphere-cluster-identity-secret-e2e" + clusterIdentityName := "vsphere-cluster-identity-e2e" + Expect(kc.CreateVSphereSecret(credSecretName)).Should(Succeed()) + Expect(kc.CreateVSphereClusterIdentity(credSecretName, clusterIdentityName)).Should(Succeed()) + By("setting VSPHERE_CLUSTER_IDENTITY env variable") + Expect(os.Setenv("VSPHERE_CLUSTER_IDENTITY", clusterIdentityName)).Should(Succeed()) + }) + + AfterEach(func() { + // If we failed collect logs from each of the affiliated controllers + // as well as the output of clusterctl to store as artifacts. + if CurrentSpecReport().Failed() { + By("collecting failure logs from controllers") + collectLogArtifacts(kc, clusterName, managedcluster.ProviderVSphere, managedcluster.ProviderCAPI) + } + + if deleteFunc != nil { + By("deleting the deployment") + err = deleteFunc() + Expect(err).NotTo(HaveOccurred()) + } + + }) + + It("should deploy standalone managed cluster", func() { + By("creating a managed cluster") + d := managedcluster.GetUnstructured(managedcluster.TemplateVSphereStandaloneCP) + clusterName = d.GetName() + + deleteFunc, err = kc.CreateManagedCluster(context.Background(), d) + Expect(err).NotTo(HaveOccurred()) + + By("waiting for infrastructure providers to deploy successfully") + Eventually(func() error { + return managedcluster.VerifyProviderDeployed(context.Background(), kc, clusterName, managedcluster.ProviderVSphere) + }).WithTimeout(30 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + By("verify the deployment deletes successfully") + err = deleteFunc() + Expect(err).NotTo(HaveOccurred()) + Eventually(func() error { + return managedcluster.VerifyProviderDeleted(context.Background(), kc, clusterName) + }).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + }) + }) + }) func verifyControllerUp(kc *kubeclient.KubeClient, labelSelector string, name string) error { diff --git a/test/kubeclient/kubeclient.go b/test/kubeclient/kubeclient.go index c1a4453c9..5d2fd3d17 100644 --- a/test/kubeclient/kubeclient.go +++ b/test/kubeclient/kubeclient.go @@ -154,6 +154,75 @@ func (kc *KubeClient) CreateAWSCredentialsKubeSecret(ctx context.Context) error return nil } +func (kc *KubeClient) CreateVSphereSecret(secretName string) error { + ctx := context.Background() + _, err := kc.Client.CoreV1().Secrets(kc.Namespace).Get(ctx, secretName, metav1.GetOptions{}) + + if !apierrors.IsNotFound(err) { + return nil + } + username := os.Getenv("VSPHERE_USER") + password := os.Getenv("VSPHERE_PASSWORD") + + _, err = kc.Client.CoreV1().Secrets(kc.Namespace).Create(ctx, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + }, + StringData: map[string]string{ + "username": username, + "password": password, + }, + Type: corev1.SecretTypeOpaque, + }, metav1.CreateOptions{}) + + if err != nil { + return fmt.Errorf("failed to create AWS credentials secret: %w", err) + } + + return nil +} + +func (kc *KubeClient) CreateVSphereClusterIdentity(secretName string, identityName string) error { + ctx := context.Background() + client, err := dynamic.NewForConfig(kc.Config) + + if err != nil { + return fmt.Errorf("failed to create dynamic client: %w", err) + } + + gvr := schema.GroupVersionResource{ + Group: "infrastructure.cluster.x-k8s.io", + Version: "v1beta1", + Resource: "vsphereclusteridentities", + } + + clusterIdentity := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", + "kind": "VSphereClusterIdentity", + "metadata": map[string]interface{}{ + "name": identityName, + }, + "spec": map[string]interface{}{ + "secretName": secretName, + "allowedNamespaces": map[string]interface{}{ + "selector": map[string]interface{}{ + "matchLabels": map[string]interface{}{}, + }, + }, + }, + }, + } + + result, err := client.Resource(gvr).Create(ctx, clusterIdentity, metav1.CreateOptions{}) + if err != nil { + fmt.Printf("%+v", result) + return fmt.Errorf("Failed to create vsphereclusteridentity: %w", err) + } + + return nil +} + // GetDynamicClient returns a dynamic client for the given GroupVersionResource. func (kc *KubeClient) GetDynamicClient(gvr schema.GroupVersionResource) (dynamic.ResourceInterface, error) { client, err := dynamic.NewForConfig(kc.Config) diff --git a/test/managedcluster/managedcluster.go b/test/managedcluster/managedcluster.go index 28783ea5d..af28952e3 100644 --- a/test/managedcluster/managedcluster.go +++ b/test/managedcluster/managedcluster.go @@ -30,9 +30,10 @@ import ( type ProviderType string const ( - ProviderCAPI ProviderType = "cluster-api" - ProviderAWS ProviderType = "infrastructure-aws" - ProviderAzure ProviderType = "infrastructure-azure" + ProviderCAPI ProviderType = "cluster-api" + ProviderAWS ProviderType = "infrastructure-aws" + ProviderAzure ProviderType = "infrastructure-azure" + ProviderVSphere ProviderType = "infrastructure-vsphere" providerLabel = "cluster.x-k8s.io/provider" ) @@ -40,8 +41,10 @@ const ( type Template string const ( - TemplateAWSStandaloneCP Template = "aws-standalone-cp" - TemplateAWSHostedCP Template = "aws-hosted-cp" + TemplateAWSStandaloneCP Template = "aws-standalone-cp" + TemplateAWSHostedCP Template = "aws-hosted-cp" + TemplateVSphereStandaloneCP Template = "vsphere-standalone-cp" + TemplateVSphereHostedCP Template = "vsphere-hosted-cp" ) //go:embed resources/aws-standalone-cp.yaml.tpl @@ -50,44 +53,47 @@ var awsStandaloneCPManagedClusterTemplateBytes []byte //go:embed resources/aws-hosted-cp.yaml.tpl var awsHostedCPManagedClusterTemplateBytes []byte +//go:embed resources/vsphere-standalone-cp.yaml.tpl +var vsphereStandaloneCPManagedClusterTemplateBytes []byte + +//go:embed resources/vsphere-hosted-cp.yaml.tpl +var vsphereHostedCPManagedClusterTemplateBytes []byte + func GetProviderLabel(provider ProviderType) string { return fmt.Sprintf("%s=%s", providerLabel, provider) } // GetUnstructured returns an unstructured ManagedCluster object based on the // provider and template. -func GetUnstructured(provider ProviderType, templateName Template) *unstructured.Unstructured { +func GetUnstructured(templateName Template) *unstructured.Unstructured { GinkgoHelper() generatedName := uuid.New().String()[:8] + "-e2e-test" _, _ = fmt.Fprintf(GinkgoWriter, "Generated cluster name: %q\n", generatedName) - switch provider { - case ProviderAWS: - Expect(os.Setenv("MANAGED_CLUSTER_NAME", generatedName)).NotTo(HaveOccurred()) - - var managedClusterTemplateBytes []byte - switch templateName { - case TemplateAWSStandaloneCP: - managedClusterTemplateBytes = awsStandaloneCPManagedClusterTemplateBytes - case TemplateAWSHostedCP: - managedClusterTemplateBytes = awsHostedCPManagedClusterTemplateBytes - default: - Fail(fmt.Sprintf("unsupported AWS template: %s", templateName)) - } - - managedClusterConfigBytes, err := envsubst.Bytes(managedClusterTemplateBytes) - Expect(err).NotTo(HaveOccurred(), "failed to substitute environment variables") + Expect(os.Setenv("MANAGED_CLUSTER_NAME", generatedName)).NotTo(HaveOccurred()) + + var managedClusterTemplateBytes []byte + switch templateName { + case TemplateAWSStandaloneCP: + managedClusterTemplateBytes = awsStandaloneCPManagedClusterTemplateBytes + case TemplateAWSHostedCP: + managedClusterTemplateBytes = awsHostedCPManagedClusterTemplateBytes + case TemplateVSphereStandaloneCP: + managedClusterTemplateBytes = vsphereStandaloneCPManagedClusterTemplateBytes + case TemplateVSphereHostedCP: + managedClusterTemplateBytes = vsphereHostedCPManagedClusterTemplateBytes + default: + Fail(fmt.Sprintf("unsupported template type: %s", templateName)) + } - var managedClusterConfig map[string]interface{} + managedClusterConfigBytes, err := envsubst.Bytes(managedClusterTemplateBytes) + Expect(err).NotTo(HaveOccurred(), "failed to substitute environment variables") - err = yaml.Unmarshal(managedClusterConfigBytes, &managedClusterConfig) - Expect(err).NotTo(HaveOccurred(), "failed to unmarshal deployment config") + var managedClusterConfig map[string]interface{} - return &unstructured.Unstructured{Object: managedClusterConfig} - default: - Fail(fmt.Sprintf("unsupported provider: %s", provider)) - } + err = yaml.Unmarshal(managedClusterConfigBytes, &managedClusterConfig) + Expect(err).NotTo(HaveOccurred(), "failed to unmarshal deployment config") - return nil + return &unstructured.Unstructured{Object: managedClusterConfig} } diff --git a/test/managedcluster/resources/vsphere-hosted-cp.yaml.tpl b/test/managedcluster/resources/vsphere-hosted-cp.yaml.tpl new file mode 100644 index 000000000..2c556d9cc --- /dev/null +++ b/test/managedcluster/resources/vsphere-hosted-cp.yaml.tpl @@ -0,0 +1,35 @@ +apiVersion: hmc.mirantis.com/v1alpha1 +kind: ManagedCluster +metadata: + name: ${MANAGED_CLUSTER_NAME} +spec: + template: vsphere-hosted-cp + config: + controlPlaneNumber: ${CONTROL_PLANE_NUMBER:=1} + workersNumber: ${WORKERS_NUMBER:=1} + clusterIdentity: + name: "${VSPHERE_CLUSTER_IDENTITY}" + vsphere: + server: "${VSPHERE_SERVER}" + thumbprint: "${VSPHERE_THUMBPRINT} " + datacenter: "${VSPHERE_DATACENTER}" + datastore: "${VSPHERE_DATASTORE}" + resourcePool: "${VSPHERE_RESOURCEPOOL}" + folder: "${VSPHERE_FOLDER}" + username: "${VSPHERE_USER}" + password: "${VSPHERE_PASSWORD}" + controlPlaneEndpointIP: "${VSPHERE_CONTROL_PLANE_ENDPOINT}" + + ssh: + user: ubuntu + publicKey: "${VSPHERE_SSH_KEY}" + rootVolumeSize: 50 + cpus: 4 + memory: 4096 + vmTemplate: "${VSPHERE_VM_TEMPLATE}" + network: "${VSPHERE_NETWORK}" + + k0smotron: + service: + annotations: + kube-vip.io/loadbalancerIPs: "${VSPHERE_CONTROL_PLANE_ENDPOINT}" diff --git a/test/managedcluster/resources/vsphere-standalone-cp.yaml.tpl b/test/managedcluster/resources/vsphere-standalone-cp.yaml.tpl new file mode 100644 index 000000000..98d193257 --- /dev/null +++ b/test/managedcluster/resources/vsphere-standalone-cp.yaml.tpl @@ -0,0 +1,41 @@ +apiVersion: hmc.mirantis.com/v1alpha1 +kind: ManagedCluster +metadata: + name: ${MANAGED_CLUSTER_NAME} +spec: + template: vsphere-standalone-cp + config: + controlPlaneNumber: ${CONTROL_PLANE_NUMBER:=1} + workersNumber: ${WORKERS_NUMBER:=1} + clusterIdentity: + name: "${VSPHERE_CLUSTER_IDENTITY}" + vsphere: + server: "${VSPHERE_SERVER}" + thumbprint: "${VSPHERE_THUMBPRINT} " + datacenter: "${VSPHERE_DATACENTER}" + datastore: "${VSPHERE_DATASTORE}" + resourcePool: "${VSPHERE_RESOURCEPOOL}" + folder: "${VSPHERE_FOLDER}" + username: "${VSPHERE_USER}" + password: "${VSPHERE_PASSWORD}" + controlPlaneEndpointIP: "${VSPHERE_CONTROL_PLANE_ENDPOINT}" + + controlPlane: + ssh: + user: ubuntu + publicKey: "${VSPHERE_SSH_KEY}" + rootVolumeSize: 50 + cpus: 4 + memory: 4096 + vmTemplate: "${VSPHERE_VM_TEMPLATE}" + network: "${VSPHERE_NETWORK}" + + worker: + ssh: + user: ubuntu + publicKey: "${VSPHERE_SSH_KEY}" + rootVolumeSize: 50 + cpus: 4 + memory: 4096 + vmTemplate: "${VSPHERE_VM_TEMPLATE}" + network: "${VSPHERE_NETWORK}" diff --git a/test/managedcluster/validate_deleted.go b/test/managedcluster/validate_deleted.go index dc5712a9f..8305fe62c 100644 --- a/test/managedcluster/validate_deleted.go +++ b/test/managedcluster/validate_deleted.go @@ -20,6 +20,7 @@ import ( "github.com/Mirantis/hmc/test/kubeclient" "github.com/Mirantis/hmc/test/utils" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) @@ -41,7 +42,7 @@ func VerifyProviderDeleted(ctx context.Context, kc *kubeclient.KubeClient, clust func validateClusterDeleted(ctx context.Context, kc *kubeclient.KubeClient, clusterName string) error { // Validate that the Cluster resource has been deleted cluster, err := kc.GetCluster(ctx, clusterName) - if err != nil { + if err != nil && !apierrors.IsNotFound(err) { return err } @@ -76,7 +77,7 @@ func validateClusterDeleted(ctx context.Context, kc *kubeclient.KubeClient, clus // been deleted. func validateMachineDeploymentsDeleted(ctx context.Context, kc *kubeclient.KubeClient, clusterName string) error { machineDeployments, err := kc.ListMachineDeployments(ctx, clusterName) - if err != nil { + if err != nil && !apierrors.IsNotFound(err) { return err } @@ -96,7 +97,7 @@ func validateMachineDeploymentsDeleted(ctx context.Context, kc *kubeclient.KubeC // been deleted. func validateK0sControlPlanesDeleted(ctx context.Context, kc *kubeclient.KubeClient, clusterName string) error { controlPlanes, err := kc.ListK0sControlPlanes(ctx, clusterName) - if err != nil { + if err != nil && !apierrors.IsNotFound(err) { return err } diff --git a/test/managedcluster/validate_deployed.go b/test/managedcluster/validate_deployed.go index f6423fb2b..b327d4f6f 100644 --- a/test/managedcluster/validate_deployed.go +++ b/test/managedcluster/validate_deployed.go @@ -45,9 +45,16 @@ var resourceValidators = map[string]resourceValidationFunc{ // VerifyProviderDeployed is a provider-agnostic verification that checks // to ensure generic resources managed by the provider have been deleted. // It is intended to be used in conjunction with an Eventually block. -func VerifyProviderDeployed(ctx context.Context, kc *kubeclient.KubeClient, clusterName string) error { - return verifyProviderAction(ctx, kc, clusterName, resourceValidators, - []string{"clusters", "machines", "control-planes", "csi-driver", "ccm"}) +func VerifyProviderDeployed( + ctx context.Context, kc *kubeclient.KubeClient, clusterName string, + providerType ProviderType) error { + if providerType == ProviderVSphere { + return verifyProviderAction(ctx, kc, clusterName, resourceValidators, + []string{"clusters", "machines", "control-planes", "csi-driver"}) + } else { + return verifyProviderAction(ctx, kc, clusterName, resourceValidators, + []string{"clusters", "machines", "control-planes", "csi-driver", "ccm"}) + } } // verifyProviderAction is a provider-agnostic verification that checks for diff --git a/test/utils/vsphere/vsphere.go b/test/utils/vsphere/vsphere.go new file mode 100644 index 000000000..d9ffcef14 --- /dev/null +++ b/test/utils/vsphere/vsphere.go @@ -0,0 +1,44 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vsphereutils + +import ( + "fmt" + "os" +) + +func CheckEnv() error { + envVars := []string{ + "VSPHERE_USER", + "VSPHERE_PASSWORD", + "VSPHERE_SERVER", + "VSPHERE_THUMBPRINT", + "VSPHERE_DATACENTER", + "VSPHERE_DATASTORE", + "VSPHERE_RESOURCEPOOL", + "VSPHERE_FOLDER", + "VSPHERE_CONTROL_PLANE_ENDPOINT", + "VSPHERE_VM_TEMPLATE", + "VSPHERE_NETWORK", + "VSPHERE_SSH_KEY", + } + for _, envVar := range envVars { + _, exists := os.LookupEnv(envVar) + if !exists { + return fmt.Errorf("Variable %s isn't set", envVar) + } + } + return nil +}