From 705dc1b47f962e07211a4bb2dd6054448f055a56 Mon Sep 17 00:00:00 2001 From: Kyle Squizzato Date: Tue, 10 Sep 2024 10:43:53 -0700 Subject: [PATCH] Add docs on running tests, do not wait for all providers in hosted test Signed-off-by: Kyle Squizzato --- docs/aws/hosted-control-plane.md | 30 ++++++++++++++++++++++++++++-- docs/dev.md | 23 +++++++++++++++++++++++ test/e2e/controller.go | 19 +++++++++++++------ test/e2e/e2e_test.go | 9 +++++++-- 4 files changed, 71 insertions(+), 10 deletions(-) diff --git a/docs/aws/hosted-control-plane.md b/docs/aws/hosted-control-plane.md index d492d7165..19ea7919c 100644 --- a/docs/aws/hosted-control-plane.md +++ b/docs/aws/hosted-control-plane.md @@ -19,7 +19,12 @@ reused with a management cluster. If you deployed your AWS Kubernetes cluster using Cluster API Provider AWS (CAPA) you can obtain all the necessary data with the commands below or use the template found below in the -[HMC ManagedCluster manifest generation](#hmc-managed-cluster-manifest-generation) section. +[HMC ManagedCluster manifest +generation](#hmc-managed-cluster-manifest-generation) section. + +If using the `aws-standalone-cp` template to deploy a hosted cluster it is +recommended to use a `t3.large` or larger instance type as the `hmc-controller` +and other provider controllers will need a large amount of resources to run. **VPC ID** @@ -89,7 +94,7 @@ Grab the following `ManagedCluster` manifest template and save it to a file name apiVersion: hmc.mirantis.com/v1alpha1 kind: ManagedCluster metadata: - name: aws-hosted-cp + name: aws-hosted spec: template: aws-hosted-cp config: @@ -109,3 +114,24 @@ Then run the following command to create the `managedcluster.yaml`: ``` kubectl get awscluster cluster -o go-template="$(cat managedcluster.yaml.tpl)" > managedcluster.yaml ``` +## Deployment Tips +* Ensure HMC templates and the controller image are somewhere public and + fetchable. +* For installing the HMC charts and templates from a custom repository, load + the `kubeconfig` from the cluster and run the commands: + +``` +KUBECONFIG=kubeconfig IMG="ghcr.io/mirantis/hmc/controller-ci:v0.0.1-179-ga5bdf29" REGISTRY_REPO="oci://ghcr.io/mirantis/hmc/charts-ci" make dev-apply +KUBECONFIG=kubeconfig make dev-templates +``` +* The infrastructure will need to manually be marked `Ready` to get the + `MachineDeployment` to scale up. You can patch the `AWSCluster` kind using + the command: + +``` +KUBECONFIG=kubeconfig kubectl patch AWSCluster --type=merge --subresource status --patch 'status: {ready: true}' -n hmc-system +``` + +For additional information on why this is required [click here](https://docs.k0smotron.io/stable/capi-aws/#:~:text=As%20we%20are%20using%20self%2Dmanaged%20infrastructure%20we%20need%20to%20manually%20mark%20the%20infrastructure%20ready.%20This%20can%20be%20accomplished%20using%20the%20following%20command). + + diff --git a/docs/dev.md b/docs/dev.md index 848959399..488bf3031 100644 --- a/docs/dev.md +++ b/docs/dev.md @@ -83,3 +83,26 @@ export KUBECONFIG=~/.kube/config kubectl --kubeconfig ~/.kube/config get secret -n hmc-system -kubeconfig -o=jsonpath={.data.value} | base64 -d > kubeconfig ``` +## Running E2E tests locally +E2E tests can be ran locally via the `make test-e2e` target. In order to have +CI properly deploy a non-local registry will need to be used and the Helm charts +and hmc-controller image will need to exist on the registry, for example, using +GHCR: + +``` +IMG="ghcr.io/mirantis/hmc/controller-ci:v0.0.1-179-ga5bdf29" \ + REGISTRY_REPO="oci://ghcr.io/mirantis/hmc/charts-ci" \ + make test-e2e +``` + +Optionally, the `NO_CLEANUP=1` env var can be used to disable `After` nodes from +running within some specs, this will allow users to debug tests by re-running +them without the need to wait a while for an infrastructure deployment to occur. +For subsequent runs the `MANAGED_CLUSTER_NAME=` env var should be +passed to tell the test what cluster name to use so that it does not try to +generate a new name and deploy a new cluster. + +Tests that run locally use autogenerated names like `12345678-e2e-test` while +tests that run in CI use names such as `ci-1234567890-e2e-test`. You can always +pass `MANAGED_CLUSTER_NAME=` from the get-go to customize the name used by the +test. diff --git a/test/e2e/controller.go b/test/e2e/controller.go index ac44c223d..fd8b12f55 100644 --- a/test/e2e/controller.go +++ b/test/e2e/controller.go @@ -14,16 +14,23 @@ const ( hmcControllerLabel = "app.kubernetes.io/name=hmc" ) -func verifyControllersUp(kc *kubeclient.KubeClient) error { +// verifyControllersUp validates that controllers for the given providers list +// are running and ready. Optionally specify providers to check for rather than +// waiting for all providers to be ready. +func verifyControllersUp(kc *kubeclient.KubeClient, providers ...managedcluster.ProviderType) error { if err := validateController(kc, hmcControllerLabel, "hmc-controller-manager"); err != nil { return err } - for _, provider := range []managedcluster.ProviderType{ - managedcluster.ProviderCAPI, - managedcluster.ProviderAWS, - managedcluster.ProviderAzure, - } { + if providers == nil { + providers = []managedcluster.ProviderType{ + managedcluster.ProviderCAPI, + managedcluster.ProviderAWS, + managedcluster.ProviderAzure, + } + } + + for _, provider := range providers { // Ensure only one controller pod is running. if err := validateController(kc, managedcluster.GetProviderLabel(provider), string(provider)); err != nil { return err diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 7bb55a83e..0daf97a62 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -22,6 +22,7 @@ import ( "os" "os/exec" "path/filepath" + "strings" "time" . "github.com/onsi/ginkgo/v2" @@ -157,7 +158,7 @@ var _ = Describe("controller", Ordered, func() { templateBy(managedcluster.TemplateAWSHostedCP, "validating that the controller is ready") Eventually(func() error { - err := verifyControllersUp(standaloneClient) + err := verifyControllersUp(standaloneClient, managedcluster.ProviderCAPI, managedcluster.ProviderAWS) if err != nil { _, _ = fmt.Fprintf( GinkgoWriter, "[%s] controller validation failed: %v\n", @@ -233,7 +234,7 @@ func collectLogArtifacts(kc *kubeclient.KubeClient, clusterName string, provider if err != nil { utils.WarnError(fmt.Errorf("failed to parse host from kubeconfig: %w", err)) } else { - host = hostURL.Host + host = strings.ReplaceAll(hostURL.Host, ":", "_") } for _, providerType := range providerTypes { @@ -287,5 +288,9 @@ func collectLogArtifacts(kc *kubeclient.KubeClient, clusterName string, provider func noCleanup() bool { noCleanup := os.Getenv(managedcluster.EnvVarNoCleanup) + if noCleanup != "" { + By(fmt.Sprintf("skipping After nodes as %s is set", managedcluster.EnvVarNoCleanup)) + } + return noCleanup != "" }