diff --git a/test/e2e/const.go b/test/e2e/const.go index 4c745119..9a723ffd 100644 --- a/test/e2e/const.go +++ b/test/e2e/const.go @@ -92,6 +92,7 @@ const ( RancherTurtlesNamespace = "rancher-turtles-system" RancherNamespace = "cattle-system" NginxIngressNamespace = "ingress-nginx" + NginxIngressDeployment = "ingress-nginx-controller" ) const ( diff --git a/test/e2e/suites/embedded-capi-disabled-v3/suite_test.go b/test/e2e/suites/embedded-capi-disabled-v3/suite_test.go index dd5fca4e..4e0fa034 100644 --- a/test/e2e/suites/embedded-capi-disabled-v3/suite_test.go +++ b/test/e2e/suites/embedded-capi-disabled-v3/suite_test.go @@ -84,45 +84,50 @@ var _ = BeforeSuite(func() { By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) + hostName = e2eConfig.GetVariable(e2e.RancherHostnameVar) + ingressType := testenv.NgrokIngress dockerUsername := "" dockerPassword := "" + var customClusterProvider testenv.CustomClusterProvider + if flagVals.UseEKS { Expect(flagVals.IsolatedMode).To(BeFalse(), "You cannot use eks with isolated") dockerUsername = os.Getenv("GITHUB_USERNAME") Expect(dockerUsername).NotTo(BeEmpty(), "Github username is required") dockerPassword = os.Getenv("GITHUB_TOKEN") Expect(dockerPassword).NotTo(BeEmpty(), "Github token is required") + customClusterProvider = testenv.EKSBootsrapCluster + Expect(customClusterProvider).NotTo(BeNil(), "EKS custom cluster provider is required") + ingressType = testenv.EKSNginxIngress + } + + if flagVals.IsolatedMode { + ingressType = testenv.CustomIngress } By(fmt.Sprintf("Creating a clusterctl config into %q", flagVals.ArtifactFolder)) clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(flagVals.ArtifactFolder, "repository")) - hostName = e2eConfig.GetVariable(e2e.RancherHostnameVar) - setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ - UseExistingCluster: flagVals.UseExistingCluster, - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - Scheme: e2e.InitScheme(), - ArtifactFolder: flagVals.ArtifactFolder, - KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), - IsolatedMode: flagVals.IsolatedMode, - HelmBinaryPath: flagVals.HelmBinaryPath, - UseEKS: flagVals.UseEKS, + UseExistingCluster: flagVals.UseExistingCluster, + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + Scheme: e2e.InitScheme(), + ArtifactFolder: flagVals.ArtifactFolder, + KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), + IsolatedMode: flagVals.IsolatedMode, + HelmBinaryPath: flagVals.HelmBinaryPath, + CustomClusterProvider: customClusterProvider, }) - if flagVals.IsolatedMode { - hostName = setupClusterResult.IsolatedHostName - } - testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher-ingress.yaml"), - IsolatedMode: flagVals.IsolatedMode, - UseEKS: flagVals.UseEKS, - NginxIngress: e2e.NginxIngress, - NginxIngressNamespace: e2e.NginxIngressNamespace, + IngressType: ingressType, + CustomIngress: e2e.NginxIngress, + CustomIngressNamespace: e2e.NginxIngressNamespace, + CustomIngressDeployment: e2e.NginxIngressDeployment, IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), NgrokApiKey: e2eConfig.GetVariable(e2e.NgrokApiKeyVar), NgrokAuthToken: e2eConfig.GetVariable(e2e.NgrokAuthTokenVar), @@ -132,6 +137,10 @@ var _ = BeforeSuite(func() { DefaultIngressClassPatch: e2e.IngressClassPatch, }) + if flagVals.IsolatedMode { + hostName = setupClusterResult.IsolatedHostName + } + if flagVals.UseEKS { By("Getting ingress hostname") svcRes := &testenv.WaitForServiceIngressHostnameResult{} diff --git a/test/e2e/suites/embedded-capi-disabled/suite_test.go b/test/e2e/suites/embedded-capi-disabled/suite_test.go index 07e5e018..b29c9c77 100644 --- a/test/e2e/suites/embedded-capi-disabled/suite_test.go +++ b/test/e2e/suites/embedded-capi-disabled/suite_test.go @@ -85,45 +85,50 @@ var _ = BeforeSuite(func() { By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) + hostName = e2eConfig.GetVariable(e2e.RancherHostnameVar) + ingressType := testenv.NgrokIngress dockerUsername := "" dockerPassword := "" + var customClusterProvider testenv.CustomClusterProvider + if flagVals.UseEKS { Expect(flagVals.IsolatedMode).To(BeFalse(), "You cannot use eks with isolated") dockerUsername = os.Getenv("GITHUB_USERNAME") Expect(dockerUsername).NotTo(BeEmpty(), "Github username is required") dockerPassword = os.Getenv("GITHUB_TOKEN") Expect(dockerPassword).NotTo(BeEmpty(), "Github token is required") + customClusterProvider = testenv.EKSBootsrapCluster + Expect(customClusterProvider).NotTo(BeNil(), "EKS custom cluster provider is required") + ingressType = testenv.EKSNginxIngress + } + + if flagVals.IsolatedMode { + ingressType = testenv.CustomIngress } By(fmt.Sprintf("Creating a clusterctl config into %q", flagVals.ArtifactFolder)) clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(flagVals.ArtifactFolder, "repository")) - hostName = e2eConfig.GetVariable(e2e.RancherHostnameVar) - setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ - UseExistingCluster: flagVals.UseExistingCluster, - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - Scheme: e2e.InitScheme(), - ArtifactFolder: flagVals.ArtifactFolder, - KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), - IsolatedMode: flagVals.IsolatedMode, - HelmBinaryPath: flagVals.HelmBinaryPath, - UseEKS: flagVals.UseEKS, + UseExistingCluster: flagVals.UseExistingCluster, + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + Scheme: e2e.InitScheme(), + ArtifactFolder: flagVals.ArtifactFolder, + KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), + IsolatedMode: flagVals.IsolatedMode, + HelmBinaryPath: flagVals.HelmBinaryPath, + CustomClusterProvider: customClusterProvider, }) - if flagVals.IsolatedMode { - hostName = setupClusterResult.IsolatedHostName - } - testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher-ingress.yaml"), - IsolatedMode: flagVals.IsolatedMode, - UseEKS: flagVals.UseEKS, - NginxIngress: e2e.NginxIngress, - NginxIngressNamespace: e2e.NginxIngressNamespace, + IngressType: ingressType, + CustomIngress: e2e.NginxIngress, + CustomIngressNamespace: e2e.NginxIngressNamespace, + CustomIngressDeployment: e2e.NginxIngressDeployment, IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), NgrokApiKey: e2eConfig.GetVariable(e2e.NgrokApiKeyVar), NgrokAuthToken: e2eConfig.GetVariable(e2e.NgrokAuthTokenVar), @@ -133,6 +138,10 @@ var _ = BeforeSuite(func() { DefaultIngressClassPatch: e2e.IngressClassPatch, }) + if flagVals.IsolatedMode { + hostName = setupClusterResult.IsolatedHostName + } + if flagVals.UseEKS { By("Getting ingress hostname") svcRes := &testenv.WaitForServiceIngressHostnameResult{} diff --git a/test/e2e/suites/import-gitops-v3/suite_test.go b/test/e2e/suites/import-gitops-v3/suite_test.go index ed96db5c..1db18d20 100644 --- a/test/e2e/suites/import-gitops-v3/suite_test.go +++ b/test/e2e/suites/import-gitops-v3/suite_test.go @@ -84,45 +84,50 @@ var _ = BeforeSuite(func() { By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) + hostName = e2eConfig.GetVariable(e2e.RancherHostnameVar) + ingressType := testenv.NgrokIngress dockerUsername := "" dockerPassword := "" + var customClusterProvider testenv.CustomClusterProvider + if flagVals.UseEKS { Expect(flagVals.IsolatedMode).To(BeFalse(), "You cannot use eks with isolated") dockerUsername = os.Getenv("GITHUB_USERNAME") Expect(dockerUsername).NotTo(BeEmpty(), "Github username is required") dockerPassword = os.Getenv("GITHUB_TOKEN") Expect(dockerPassword).NotTo(BeEmpty(), "Github token is required") + customClusterProvider = testenv.EKSBootsrapCluster + Expect(customClusterProvider).NotTo(BeNil(), "EKS custom cluster provider is required") + ingressType = testenv.EKSNginxIngress + } + + if flagVals.IsolatedMode { + ingressType = testenv.CustomIngress } By(fmt.Sprintf("Creating a clusterctl config into %q", flagVals.ArtifactFolder)) clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(flagVals.ArtifactFolder, "repository")) - hostName = e2eConfig.GetVariable(e2e.RancherHostnameVar) - setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ - UseExistingCluster: flagVals.UseExistingCluster, - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - Scheme: e2e.InitScheme(), - ArtifactFolder: flagVals.ArtifactFolder, - KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), - IsolatedMode: flagVals.IsolatedMode, - HelmBinaryPath: flagVals.HelmBinaryPath, - UseEKS: flagVals.UseEKS, + UseExistingCluster: flagVals.UseExistingCluster, + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + Scheme: e2e.InitScheme(), + ArtifactFolder: flagVals.ArtifactFolder, + KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), + IsolatedMode: flagVals.IsolatedMode, + HelmBinaryPath: flagVals.HelmBinaryPath, + CustomClusterProvider: customClusterProvider, }) - if flagVals.IsolatedMode { - hostName = setupClusterResult.IsolatedHostName - } - testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher-ingress.yaml"), - IsolatedMode: flagVals.IsolatedMode, - UseEKS: flagVals.UseEKS, - NginxIngress: e2e.NginxIngress, - NginxIngressNamespace: e2e.NginxIngressNamespace, + IngressType: ingressType, + CustomIngress: e2e.NginxIngress, + CustomIngressNamespace: e2e.NginxIngressNamespace, + CustomIngressDeployment: e2e.NginxIngressDeployment, IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), NgrokApiKey: e2eConfig.GetVariable(e2e.NgrokApiKeyVar), NgrokAuthToken: e2eConfig.GetVariable(e2e.NgrokAuthTokenVar), @@ -132,6 +137,10 @@ var _ = BeforeSuite(func() { DefaultIngressClassPatch: e2e.IngressClassPatch, }) + if flagVals.IsolatedMode { + hostName = setupClusterResult.IsolatedHostName + } + if flagVals.UseEKS { By("Getting ingress hostname") svcRes := &testenv.WaitForServiceIngressHostnameResult{} diff --git a/test/e2e/suites/import-gitops/suite_test.go b/test/e2e/suites/import-gitops/suite_test.go index 8b945041..db86bf14 100644 --- a/test/e2e/suites/import-gitops/suite_test.go +++ b/test/e2e/suites/import-gitops/suite_test.go @@ -87,45 +87,50 @@ var _ = BeforeSuite(func() { By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) + hostName = e2eConfig.GetVariable(e2e.RancherHostnameVar) + ingressType := testenv.NgrokIngress dockerUsername := "" dockerPassword := "" + var customClusterProvider testenv.CustomClusterProvider + if flagVals.UseEKS { Expect(flagVals.IsolatedMode).To(BeFalse(), "You cannot use eks with isolated") dockerUsername = os.Getenv("GITHUB_USERNAME") Expect(dockerUsername).NotTo(BeEmpty(), "Github username is required") dockerPassword = os.Getenv("GITHUB_TOKEN") Expect(dockerPassword).NotTo(BeEmpty(), "Github token is required") + customClusterProvider = testenv.EKSBootsrapCluster + Expect(customClusterProvider).NotTo(BeNil(), "EKS custom cluster provider is required") + ingressType = testenv.EKSNginxIngress + } + + if flagVals.IsolatedMode { + ingressType = testenv.CustomIngress } By(fmt.Sprintf("Creating a clusterctl config into %q", flagVals.ArtifactFolder)) clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(flagVals.ArtifactFolder, "repository")) - hostName = e2eConfig.GetVariable(e2e.RancherHostnameVar) - setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ - UseExistingCluster: flagVals.UseExistingCluster, - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - Scheme: e2e.InitScheme(), - ArtifactFolder: flagVals.ArtifactFolder, - KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), - IsolatedMode: flagVals.IsolatedMode, - HelmBinaryPath: flagVals.HelmBinaryPath, - UseEKS: flagVals.UseEKS, + UseExistingCluster: flagVals.UseExistingCluster, + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + Scheme: e2e.InitScheme(), + ArtifactFolder: flagVals.ArtifactFolder, + KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), + IsolatedMode: flagVals.IsolatedMode, + HelmBinaryPath: flagVals.HelmBinaryPath, + CustomClusterProvider: customClusterProvider, }) - if flagVals.IsolatedMode { - hostName = setupClusterResult.IsolatedHostName - } - testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher-ingress.yaml"), - IsolatedMode: flagVals.IsolatedMode, - UseEKS: flagVals.UseEKS, - NginxIngress: e2e.NginxIngress, - NginxIngressNamespace: e2e.NginxIngressNamespace, + IngressType: ingressType, + CustomIngress: e2e.NginxIngress, + CustomIngressNamespace: e2e.NginxIngressNamespace, + CustomIngressDeployment: e2e.NginxIngressDeployment, IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), NgrokApiKey: e2eConfig.GetVariable(e2e.NgrokApiKeyVar), NgrokAuthToken: e2eConfig.GetVariable(e2e.NgrokAuthTokenVar), @@ -135,6 +140,10 @@ var _ = BeforeSuite(func() { DefaultIngressClassPatch: e2e.IngressClassPatch, }) + if flagVals.IsolatedMode { + hostName = setupClusterResult.IsolatedHostName + } + if flagVals.UseEKS { By("Getting ingress hostname") svcRes := &testenv.WaitForServiceIngressHostnameResult{} diff --git a/test/e2e/suites/migrate-gitops/suite_test.go b/test/e2e/suites/migrate-gitops/suite_test.go index 48854bb1..c514e143 100644 --- a/test/e2e/suites/migrate-gitops/suite_test.go +++ b/test/e2e/suites/migrate-gitops/suite_test.go @@ -91,31 +91,39 @@ var _ = BeforeSuite(func() { clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(flagVals.ArtifactFolder, "repository")) hostName = e2eConfig.GetVariable(e2e.RancherHostnameVar) + ingressType := testenv.NgrokIngress + var customClusterProvider testenv.CustomClusterProvider - setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ - UseExistingCluster: flagVals.UseExistingCluster, - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - Scheme: e2e.InitScheme(), - ArtifactFolder: flagVals.ArtifactFolder, - KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), - IsolatedMode: flagVals.IsolatedMode, - HelmBinaryPath: flagVals.HelmBinaryPath, - UseEKS: flagVals.UseEKS, - }) + if flagVals.UseEKS { + customClusterProvider = testenv.EKSBootsrapCluster + Expect(customClusterProvider).NotTo(BeNil(), "EKS custom cluster provider is required") + ingressType = testenv.EKSNginxIngress + } if flagVals.IsolatedMode { - hostName = setupClusterResult.IsolatedHostName + ingressType = testenv.CustomIngress } + setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ + UseExistingCluster: flagVals.UseExistingCluster, + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + Scheme: e2e.InitScheme(), + ArtifactFolder: flagVals.ArtifactFolder, + KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), + IsolatedMode: flagVals.IsolatedMode, + HelmBinaryPath: flagVals.HelmBinaryPath, + CustomClusterProvider: customClusterProvider, + }) + testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher-ingress.yaml"), - IsolatedMode: flagVals.IsolatedMode, - UseEKS: flagVals.UseEKS, - NginxIngress: e2e.NginxIngress, - NginxIngressNamespace: e2e.NginxIngressNamespace, + IngressType: ingressType, + CustomIngress: e2e.NginxIngress, + CustomIngressNamespace: e2e.NginxIngressNamespace, + CustomIngressDeployment: e2e.NginxIngressDeployment, IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), NgrokApiKey: e2eConfig.GetVariable(e2e.NgrokApiKeyVar), NgrokAuthToken: e2eConfig.GetVariable(e2e.NgrokAuthTokenVar), @@ -125,6 +133,10 @@ var _ = BeforeSuite(func() { DefaultIngressClassPatch: e2e.IngressClassPatch, }) + if flagVals.IsolatedMode { + hostName = setupClusterResult.IsolatedHostName + } + rancherInput := testenv.DeployRancherInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, diff --git a/test/e2e/suites/update-labels/suite_test.go b/test/e2e/suites/update-labels/suite_test.go index fb7dbccd..5afa860b 100644 --- a/test/e2e/suites/update-labels/suite_test.go +++ b/test/e2e/suites/update-labels/suite_test.go @@ -83,45 +83,50 @@ var _ = BeforeSuite(func() { By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) + hostName = e2eConfig.GetVariable(e2e.RancherHostnameVar) + ingressType := testenv.NgrokIngress dockerUsername := "" dockerPassword := "" + var customClusterProvider testenv.CustomClusterProvider + if flagVals.UseEKS { Expect(flagVals.IsolatedMode).To(BeFalse(), "You cannot use eks with isolated") dockerUsername = os.Getenv("GITHUB_USERNAME") Expect(dockerUsername).NotTo(BeEmpty(), "Github username is required") dockerPassword = os.Getenv("GITHUB_TOKEN") Expect(dockerPassword).NotTo(BeEmpty(), "Github token is required") + customClusterProvider = testenv.EKSBootsrapCluster + Expect(customClusterProvider).NotTo(BeNil(), "EKS custom cluster provider is required") + ingressType = testenv.EKSNginxIngress + } + + if flagVals.IsolatedMode { + ingressType = testenv.CustomIngress } By(fmt.Sprintf("Creating a clusterctl config into %q", flagVals.ArtifactFolder)) clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(flagVals.ArtifactFolder, "repository")) - hostName = e2eConfig.GetVariable(e2e.RancherHostnameVar) - setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ - UseExistingCluster: flagVals.UseExistingCluster, - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - Scheme: e2e.InitScheme(), - ArtifactFolder: flagVals.ArtifactFolder, - KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), - IsolatedMode: flagVals.IsolatedMode, - HelmBinaryPath: flagVals.HelmBinaryPath, - UseEKS: flagVals.UseEKS, + UseExistingCluster: flagVals.UseExistingCluster, + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + Scheme: e2e.InitScheme(), + ArtifactFolder: flagVals.ArtifactFolder, + KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), + IsolatedMode: flagVals.IsolatedMode, + HelmBinaryPath: flagVals.HelmBinaryPath, + CustomClusterProvider: customClusterProvider, }) - if flagVals.IsolatedMode { - hostName = setupClusterResult.IsolatedHostName - } - testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher-ingress.yaml"), - IsolatedMode: flagVals.IsolatedMode, - UseEKS: flagVals.UseEKS, - NginxIngress: e2e.NginxIngress, - NginxIngressNamespace: e2e.NginxIngressNamespace, + IngressType: ingressType, + CustomIngress: e2e.NginxIngress, + CustomIngressNamespace: e2e.NginxIngressNamespace, + CustomIngressDeployment: e2e.NginxIngressDeployment, IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), NgrokApiKey: e2eConfig.GetVariable(e2e.NgrokApiKeyVar), NgrokAuthToken: e2eConfig.GetVariable(e2e.NgrokAuthTokenVar), @@ -131,6 +136,10 @@ var _ = BeforeSuite(func() { DefaultIngressClassPatch: e2e.IngressClassPatch, }) + if flagVals.IsolatedMode { + hostName = setupClusterResult.IsolatedHostName + } + if flagVals.UseEKS { By("Getting ingress hostname") svcRes := &testenv.WaitForServiceIngressHostnameResult{} diff --git a/test/e2e/suites/v2prov/suite_test.go b/test/e2e/suites/v2prov/suite_test.go index f0ff10b9..71f05cd8 100644 --- a/test/e2e/suites/v2prov/suite_test.go +++ b/test/e2e/suites/v2prov/suite_test.go @@ -83,45 +83,50 @@ var _ = BeforeSuite(func() { By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) + hostName = e2eConfig.GetVariable(e2e.RancherHostnameVar) + ingressType := testenv.NgrokIngress dockerUsername := "" dockerPassword := "" + var customClusterProvider testenv.CustomClusterProvider + if flagVals.UseEKS { Expect(flagVals.IsolatedMode).To(BeFalse(), "You cannot use eks with isolated") dockerUsername = os.Getenv("GITHUB_USERNAME") Expect(dockerUsername).NotTo(BeEmpty(), "Github username is required") dockerPassword = os.Getenv("GITHUB_TOKEN") Expect(dockerPassword).NotTo(BeEmpty(), "Github token is required") + customClusterProvider = testenv.EKSBootsrapCluster + Expect(customClusterProvider).NotTo(BeNil(), "EKS custom cluster provider is required") + ingressType = testenv.EKSNginxIngress + } + + if flagVals.IsolatedMode { + ingressType = testenv.CustomIngress } By(fmt.Sprintf("Creating a clusterctl config into %q", flagVals.ArtifactFolder)) clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(flagVals.ArtifactFolder, "repository")) - hostName = e2eConfig.GetVariable(e2e.RancherHostnameVar) - setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ - UseExistingCluster: flagVals.UseExistingCluster, - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - Scheme: e2e.InitScheme(), - ArtifactFolder: flagVals.ArtifactFolder, - KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), - IsolatedMode: flagVals.IsolatedMode, - HelmBinaryPath: flagVals.HelmBinaryPath, - UseEKS: flagVals.UseEKS, + UseExistingCluster: flagVals.UseExistingCluster, + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + Scheme: e2e.InitScheme(), + ArtifactFolder: flagVals.ArtifactFolder, + KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), + IsolatedMode: flagVals.IsolatedMode, + HelmBinaryPath: flagVals.HelmBinaryPath, + CustomClusterProvider: customClusterProvider, }) - if flagVals.IsolatedMode { - hostName = setupClusterResult.IsolatedHostName - } - testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, HelmBinaryPath: flagVals.HelmBinaryPath, HelmExtraValuesPath: filepath.Join(flagVals.HelmExtraValuesDir, "deploy-rancher-ingress.yaml"), - IsolatedMode: flagVals.IsolatedMode, - UseEKS: flagVals.UseEKS, - NginxIngress: e2e.NginxIngress, - NginxIngressNamespace: e2e.NginxIngressNamespace, + IngressType: ingressType, + CustomIngress: e2e.NginxIngress, + CustomIngressNamespace: e2e.NginxIngressNamespace, + CustomIngressDeployment: e2e.NginxIngressDeployment, IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), NgrokApiKey: e2eConfig.GetVariable(e2e.NgrokApiKeyVar), NgrokAuthToken: e2eConfig.GetVariable(e2e.NgrokAuthTokenVar), @@ -131,6 +136,10 @@ var _ = BeforeSuite(func() { DefaultIngressClassPatch: e2e.IngressClassPatch, }) + if flagVals.IsolatedMode { + hostName = setupClusterResult.IsolatedHostName + } + if flagVals.UseEKS { By("Getting ingress hostname") svcRes := &testenv.WaitForServiceIngressHostnameResult{} diff --git a/test/testenv/bootstrapclusterproviders.go b/test/testenv/bootstrapclusterproviders.go new file mode 100644 index 00000000..704900a5 --- /dev/null +++ b/test/testenv/bootstrapclusterproviders.go @@ -0,0 +1,48 @@ +/* +Copyright © 2023 - 2024 SUSE LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testenv + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "sigs.k8s.io/cluster-api/test/framework/bootstrap" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" +) + +type CustomClusterProvider func(ctx context.Context, config *clusterctl.E2EConfig, clusterName, kubernetesVersion string) bootstrap.ClusterProvider + +// EKSBootsrapCluster creates a new EKS bootstrap cluster and returns a ClusterProvider +func EKSBootsrapCluster(ctx context.Context, config *clusterctl.E2EConfig, clusterName, kubernetesVersion string) bootstrap.ClusterProvider { + By("Creating a new EKS bootstrap cluster") + + region := config.Variables["KUBERNETES_MANAGEMENT_AWS_REGION"] + Expect(region).ToNot(BeEmpty(), "KUBERNETES_MANAGEMENT_AWS_REGION must be set in the e2e config") + + eksCreateResult := &CreateEKSBootstrapClusterAndValidateImagesInputResult{} + CreateEKSBootstrapClusterAndValidateImages(ctx, CreateEKSBootstrapClusterAndValidateImagesInput{ + Name: clusterName, + Version: kubernetesVersion, + Region: region, + NumWorkers: 1, + Images: config.Images, + }, eksCreateResult) + + return eksCreateResult.BootstrapClusterProvider +} diff --git a/test/testenv/rancher.go b/test/testenv/rancher.go index a69252b6..9c7a1930 100644 --- a/test/testenv/rancher.go +++ b/test/testenv/rancher.go @@ -263,75 +263,76 @@ func RestartRancher(ctx context.Context, input RestartRancherInput) { }, input.RancherWaitInterval...).ShouldNot(HaveOccurred()) } +type IngressType string + +const ( + CustomIngress IngressType = "custom" + NgrokIngress IngressType = "ngrok" + EKSNginxIngress IngressType = "eks" +) + type RancherDeployIngressInput struct { BootstrapClusterProxy framework.ClusterProxy HelmBinaryPath string HelmExtraValuesPath string - IsolatedMode bool - NginxIngress []byte - NginxIngressNamespace string + CustomIngress []byte // TODO: add ability to pass a function that deploys the custom ingress + CustomIngressNamespace string + CustomIngressDeployment string IngressWaitInterval []interface{} + DefaultIngressClassPatch []byte + IngressType IngressType NgrokApiKey string NgrokAuthToken string NgrokPath string NgrokRepoName string NgrokRepoURL string - DefaultIngressClassPatch []byte - UseEKS bool } func RancherDeployIngress(ctx context.Context, input RancherDeployIngressInput) { - Expect(ctx).NotTo(BeNil(), "ctx is required for RancherDeployIngress") Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "BootstrapClusterProxy is required for RancherDeployIngress") - if input.IsolatedMode { - Expect(input.NginxIngress).ToNot(BeEmpty(), "NginxIngress is required when running in isolated mode") - Expect(input.NginxIngressNamespace).ToNot(BeEmpty(), "NginxIngressNamespace is required when running in isolated mode") - Expect(input.IngressWaitInterval).ToNot(BeNil(), "IngressWaitInterval is required when running in isolated mode") - } else if input.UseEKS { - Expect(input.IngressWaitInterval).ToNot(BeNil(), "IngressWaitInterval is required when running in isolated mode") - } else { - Expect(input.NgrokApiKey).ToNot(BeEmpty(), "NgrokApiKey is required when not running in isolated mode") - Expect(input.NgrokAuthToken).ToNot(BeEmpty(), "NgrokAuthToken is required when not running in isolated mode") - Expect(input.NgrokPath).ToNot(BeEmpty(), "NgrokPath is required when not running in isolated mode") - Expect(input.NgrokRepoName).ToNot(BeEmpty(), "NgrokRepoName is required when not running in isolated mode") - Expect(input.NgrokRepoURL).ToNot(BeEmpty(), "NgrokRepoURL is required when not running in isolated mode") - Expect(input.HelmExtraValuesPath).ToNot(BeNil(), "HelmExtraValuesPath is when not running in isolated mode") - } + Expect(input.IngressType).ToNot(BeEmpty(), "IngressType is required for RancherDeployIngress") komega.SetClient(input.BootstrapClusterProxy.GetClient()) komega.SetContext(ctx) - if input.IsolatedMode { + switch input.IngressType { + case CustomIngress: + Expect(input.CustomIngress).ToNot(BeEmpty(), "CustomIngress is required when using custom ingress") + Expect(input.CustomIngressNamespace).ToNot(BeEmpty(), "CustomIngressNamespace is required when using custom ingress") + Expect(input.CustomIngressDeployment).ToNot(BeEmpty(), "CustomIngressDeployment is required when using custom ingress") + Expect(input.IngressWaitInterval).ToNot(BeNil(), "IngressWaitInterval is required when using custom ingress") deployIsolatedModeIngress(ctx, input) - - return + case NgrokIngress: + Expect(input.NgrokApiKey).ToNot(BeEmpty(), "NgrokApiKey is required when using ngrok ingress") + Expect(input.NgrokAuthToken).ToNot(BeEmpty(), "NgrokAuthToken is required when using ngrok ingress") + Expect(input.NgrokPath).ToNot(BeEmpty(), "NgrokPath is required when using ngrok ingress") + Expect(input.NgrokRepoName).ToNot(BeEmpty(), "NgrokRepoName is required when using ngrok ingress") + Expect(input.NgrokRepoURL).ToNot(BeEmpty(), "NgrokRepoURL is required when using ngrok ingress") + Expect(input.HelmExtraValuesPath).ToNot(BeNil(), "HelmExtraValuesPath is when using ngrok ingress") + deployNgrokIngress(ctx, input) + case EKSNginxIngress: + Expect(input.IngressWaitInterval).ToNot(BeNil(), "IngressWaitInterval is required when using eks ingress") + deployEKSIngress(input) } - if input.UseEKS { - deployEKSIngress(ctx, input) - - return - } - - deployNgrokIngress(ctx, input) } func deployIsolatedModeIngress(ctx context.Context, input RancherDeployIngressInput) { - By("Deploying nginx ingress") - Expect(input.BootstrapClusterProxy.Apply(ctx, []byte(input.NginxIngress))).To(Succeed()) + By("Deploying custom ingress") + Expect(input.BootstrapClusterProxy.Apply(ctx, []byte(input.CustomIngress))).To(Succeed()) - By("Getting nginx ingress deployment") - ngixDeployment := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: "ingress-nginx-controller", Namespace: input.NginxIngressNamespace}} + By("Getting custom ingress deployment") + ingressDeployment := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: input.CustomIngressDeployment, Namespace: input.CustomIngressNamespace}} Eventually( - komega.Get(ngixDeployment), + komega.Get(ingressDeployment), input.IngressWaitInterval..., - ).Should(Succeed(), "Failed to get nginx ingress controller") + ).Should(Succeed(), "Failed to get custom ingress controller") - By("Waiting for ingress-nginx-controller deployment to be available") - Eventually(komega.Object(ngixDeployment), input.IngressWaitInterval...).Should(HaveField("Status.AvailableReplicas", Equal(int32(1)))) + turtlesframework.Byf("Waiting for %s deployment to be available", input.CustomIngressDeployment) + Eventually(komega.Object(ingressDeployment), input.IngressWaitInterval...).Should(HaveField("Status.AvailableReplicas", Equal(int32(1)))) } -func deployEKSIngress(ctx context.Context, input RancherDeployIngressInput) { +func deployEKSIngress(input RancherDeployIngressInput) { By("Add nginx ingress chart repo") certChart := &opframework.HelmChart{ BinaryPath: input.HelmBinaryPath, diff --git a/test/testenv/setupcluster.go b/test/testenv/setupcluster.go index f7a59be2..e1814ecc 100644 --- a/test/testenv/setupcluster.go +++ b/test/testenv/setupcluster.go @@ -37,15 +37,15 @@ import ( type SetupTestClusterInput struct { UseExistingCluster bool - UseEKS bool E2EConfig *clusterctl.E2EConfig ClusterctlConfigPath string Scheme *runtime.Scheme ArtifactFolder string // Hostname string - KubernetesVersion string - IsolatedMode bool - HelmBinaryPath string + KubernetesVersion string + IsolatedMode bool + HelmBinaryPath string + CustomClusterProvider CustomClusterProvider } type SetupTestClusterResult struct { @@ -76,7 +76,7 @@ func SetupTestCluster(ctx context.Context, input SetupTestClusterInput) *SetupTe By("Setting up the bootstrap cluster") result.BootstrapClusterProvider, result.BootstrapClusterProxy = setupCluster( - ctx, input.E2EConfig, input.Scheme, clusterName, input.UseExistingCluster, input.UseEKS, input.KubernetesVersion) + ctx, input.E2EConfig, input.Scheme, clusterName, input.UseExistingCluster, input.KubernetesVersion, input.CustomClusterProvider) if input.UseExistingCluster { return result @@ -87,31 +87,18 @@ func SetupTestCluster(ctx context.Context, input SetupTestClusterInput) *SetupTe result.BootstrapClusterLogFolder = filepath.Join(input.ArtifactFolder, "clusters", result.BootstrapClusterProxy.GetName()) Expect(os.MkdirAll(result.BootstrapClusterLogFolder, 0o750)).To(Succeed(), "Invalid argument. Log folder can't be created %s", result.BootstrapClusterLogFolder) - if input.IsolatedMode { - result.IsolatedHostName = configureIsolatedEnvironment(ctx, result.BootstrapClusterProxy) - } + result.IsolatedHostName = getInternalClusterHostname(ctx, result.BootstrapClusterProxy) return result } -func setupCluster(ctx context.Context, config *clusterctl.E2EConfig, scheme *runtime.Scheme, clusterName string, useExistingCluster, useEKS bool, kubernetesVersion string) (bootstrap.ClusterProvider, framework.ClusterProxy) { +func setupCluster(ctx context.Context, config *clusterctl.E2EConfig, scheme *runtime.Scheme, clusterName string, useExistingCluster bool, kubernetesVersion string, customClusterProvider CustomClusterProvider) (bootstrap.ClusterProvider, framework.ClusterProxy) { var clusterProvider bootstrap.ClusterProvider kubeconfigPath := "" - if !useExistingCluster { - if useEKS { - region := config.Variables["KUBERNETES_MANAGEMENT_AWS_REGION"] - Expect(region).ToNot(BeEmpty(), "KUBERNETES_MANAGEMENT_AWS_REGION must be set in the e2e config") - - eksCreateResult := &CreateEKSBootstrapClusterAndValidateImagesInputResult{} - CreateEKSBootstrapClusterAndValidateImages(ctx, CreateEKSBootstrapClusterAndValidateImagesInput{ - Name: clusterName, - Version: kubernetesVersion, - Region: region, - NumWorkers: 1, - Images: config.Images, - }, eksCreateResult) - clusterProvider = eksCreateResult.BootstrapClusterProvider + if !useExistingCluster { + if customClusterProvider != nil { // if customClusterProvider is provided, use it to create the bootstrap cluster instead of kind + clusterProvider = customClusterProvider(ctx, config, clusterName, kubernetesVersion) } else { clusterProvider = bootstrap.CreateKindBootstrapClusterAndLoadImages(ctx, bootstrap.CreateKindBootstrapClusterAndLoadImagesInput{ Name: clusterName, @@ -120,6 +107,7 @@ func setupCluster(ctx context.Context, config *clusterctl.E2EConfig, scheme *run Images: config.Images, }) } + Expect(clusterProvider).ToNot(BeNil(), "Failed to create a bootstrap cluster") kubeconfigPath = clusterProvider.GetKubeconfigPath() @@ -132,9 +120,10 @@ func setupCluster(ctx context.Context, config *clusterctl.E2EConfig, scheme *run return clusterProvider, proxy } -// configureIsolatedEnvironment gets the isolatedHostName by setting it to the IP of the first and only node in the boostrap cluster. Labels the node with +// configureIsolatedEnvironment gets the internal by setting it to the IP of the first and only node in the boostrap cluster. Labels the node with // "ingress-ready" so that the nginx ingress controller can pick it up, required by kind. See: https://kind.sigs.k8s.io/docs/user/ingress/#create-cluster -func configureIsolatedEnvironment(ctx context.Context, clusterProxy framework.ClusterProxy) string { +// This hostname can be used in an environment where the cluster is isolated from the outside world and a Rancher hostname is required. +func getInternalClusterHostname(ctx context.Context, clusterProxy framework.ClusterProxy) string { cpNodeList := corev1.NodeList{} Expect(clusterProxy.GetClient().List(ctx, &cpNodeList)).To(Succeed()) Expect(cpNodeList.Items).To(HaveLen(1))