From 8245cd085f188da5ceaf6c6620178b6ec85e61ea Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Thu, 25 Jan 2024 16:18:51 +0100 Subject: [PATCH] vcsim server --- Makefile | 26 + internal/test/helpers/vcsim/builder.go | 32 +- pkg/util/supervisor.go | 27 + test/framework/vmoperator/doc.go | 18 + test/framework/vmoperator/vmoperator.go | 576 ++++++++++ test/framework/vmoperator/vmoperator_test.go | 108 ++ test/go.mod | 77 +- test/go.sum | 275 ++--- .../tmp-to-be-deleted/README.md | 1 + .../api/v1alpha1/etcdcluster_annotations.go | 42 + .../api/v1alpha1/groupversion_info.go | 36 + .../api/v1alpha1/machine_types.go | 56 + .../api/v1alpha1/zz_generated.deepcopy.go | 114 ++ .../tmp-to-be-deleted/runtime/alias.go | 39 + .../tmp-to-be-deleted/runtime/cache/cache.go | 196 ++++ .../runtime/cache/cache_test.go | 136 +++ .../tmp-to-be-deleted/runtime/cache/client.go | 446 ++++++++ .../runtime/cache/client_test.go | 808 +++++++++++++ .../tmp-to-be-deleted/runtime/cache/doc.go | 25 + .../tmp-to-be-deleted/runtime/cache/gc.go | 114 ++ .../runtime/cache/gc_test.go | 85 ++ .../tmp-to-be-deleted/runtime/cache/hooks.go | 89 ++ .../runtime/cache/informer.go | 128 +++ .../tmp-to-be-deleted/runtime/cache/sync.go | 196 ++++ .../runtime/cache/sync_test.go | 85 ++ .../runtime/client/client.go | 53 + .../tmp-to-be-deleted/runtime/client/doc.go | 23 + .../tmp-to-be-deleted/runtime/doc.go | 31 + .../tmp-to-be-deleted/runtime/manager/doc.go | 22 + .../runtime/manager/manager.go | 102 ++ .../resourcegroup/cached_resourcegroup.go | 79 ++ .../runtime/resourcegroup/doc.go | 20 + .../runtime/resourcegroup/resourcegroup.go | 26 + .../tmp-to-be-deleted/server/api/const.go | 301 +++++ .../tmp-to-be-deleted/server/api/debug.go | 73 ++ .../tmp-to-be-deleted/server/api/doc.go | 22 + .../tmp-to-be-deleted/server/api/handler.go | 674 +++++++++++ .../tmp-to-be-deleted/server/api/metrics.go | 54 + .../server/api/portforward/doc.go | 23 + .../server/api/portforward/httpstreams.go | 321 ++++++ .../tmp-to-be-deleted/server/api/watch.go | 186 +++ .../tmp-to-be-deleted/server/certs.go | 101 ++ .../tmp-to-be-deleted/server/doc.go | 40 + .../tmp-to-be-deleted/server/etcd/doc.go | 22 + .../tmp-to-be-deleted/server/etcd/handler.go | 358 ++++++ .../server/etcd/handler_test.go | 121 ++ .../tmp-to-be-deleted/server/etcd/metrics.go | 41 + .../tmp-to-be-deleted/server/listener.go | 138 +++ .../tmp-to-be-deleted/server/mux.go | 586 ++++++++++ .../tmp-to-be-deleted/server/mux_test.go | 574 ++++++++++ .../tmp-to-be-deleted/server/proxy/addr.go | 59 + .../tmp-to-be-deleted/server/proxy/conn.go | 87 ++ .../tmp-to-be-deleted/server/proxy/dial.go | 158 +++ .../tmp-to-be-deleted/server/proxy/doc.go | 25 + .../tmp-to-be-deleted/server/proxy/proxy.go | 46 + test/infrastructure/vcsim/Dockerfile | 80 ++ test/infrastructure/vcsim/README.md | 4 + .../vcsim/api/v1alpha1/.import-restrictions | 5 + .../v1alpha1/controlplaneendpoint_types.go | 68 ++ test/infrastructure/vcsim/api/v1alpha1/doc.go | 20 + .../vcsim/api/v1alpha1/envvar_types.go | 96 ++ .../vcsim/api/v1alpha1/groupversion_info.go | 51 + .../vcsim/api/v1alpha1/vcsim_types.go | 118 ++ .../api/v1alpha1/zz_generated.deepcopy.go | 395 +++++++ .../vcsim/config/certmanager/certificate.yaml | 24 + .../config/certmanager/kustomization.yaml | 5 + .../config/certmanager/kustomizeconfig.yaml | 19 + ...luster.x-k8s.io_controlplaneendpoints.yaml | 57 + ...frastructure.cluster.x-k8s.io_envvars.yaml | 98 ++ ...re.cluster.x-k8s.io_vcentersimulators.yaml | 104 ++ .../vcsim/config/crd/kustomization.yaml | 26 + .../vcsim/config/crd/kustomizeconfig.yaml | 17 + .../cainjection_in_controlplaneendpoints.yaml | 8 + .../crd/patches/cainjection_in_envvars.yaml | 8 + .../cainjection_in_vcentersimulators.yaml | 8 + .../vcsim/config/default/kustomization.yaml | 55 + .../vcsim/config/default/kustomizeconfig.yaml | 4 + .../config/default/manager_image_patch.yaml | 11 + .../config/default/manager_pull_policy.yaml | 11 + .../config/default/manager_webhook_patch.yaml | 23 + .../vcsim/config/default/namespace.yaml | 6 + .../vcsim/config/manager/kustomization.yaml | 2 + .../vcsim/config/manager/manager.yaml | 65 ++ .../vcsim/config/rbac/kustomization.yaml | 6 + .../vcsim/config/rbac/role.yaml | 234 ++++ .../vcsim/config/rbac/role_binding.yaml | 12 + .../vcsim/config/rbac/service_account.yaml | 5 + .../vcsim/config/webhook/kustomization.yaml | 5 + .../vcsim/config/webhook/kustomizeconfig.yaml | 25 + .../vcsim/config/webhook/service.yaml | 9 + .../controlplaneendpoint_controller.go | 148 +++ .../controlplaneendpoint_controller_test.go | 113 ++ test/infrastructure/vcsim/controllers/doc.go | 18 + .../vcsim/controllers/envvar_controller.go | 250 ++++ .../vcsim/controllers/images/images.go | 28 + .../images/ttylinux-pc_i486-16.1.ovf | 94 ++ .../infrastructure/vcsim/controllers/vcsim.go | 102 ++ .../vcsim/controllers/vcsim_controller.go | 361 ++++++ .../controllers/vcsim_controller_test.go | 117 ++ .../vcsim/controllers/vcsim_test.go | 44 + .../controllers/virtualmachine_controller.go | 325 ++++++ .../virtualmachine_controller_test.go | 276 +++++ .../controllers/vmbootstrap_controller.go | 1008 +++++++++++++++++ .../vcsim/controllers/vmip_controller.go | 140 +++ .../vcsim/controllers/vspherevm_controller.go | 305 +++++ .../controllers/vspherevm_controller_test.go | 303 +++++ test/infrastructure/vcsim/main.go | 410 +++++++ test/infrastructure/vcsim/scripts/vcsim.sh | 100 ++ test/infrastructure/vcsim/tilt-provider.json | 14 + tilt-provider.json | 55 +- 110 files changed, 13789 insertions(+), 207 deletions(-) create mode 100644 test/framework/vmoperator/doc.go create mode 100644 test/framework/vmoperator/vmoperator.go create mode 100644 test/framework/vmoperator/vmoperator_test.go create mode 100644 test/infrastructure/tmp-to-be-deleted/README.md create mode 100644 test/infrastructure/tmp-to-be-deleted/api/v1alpha1/etcdcluster_annotations.go create mode 100644 test/infrastructure/tmp-to-be-deleted/api/v1alpha1/groupversion_info.go create mode 100644 test/infrastructure/tmp-to-be-deleted/api/v1alpha1/machine_types.go create mode 100644 test/infrastructure/tmp-to-be-deleted/api/v1alpha1/zz_generated.deepcopy.go create mode 100644 test/infrastructure/tmp-to-be-deleted/runtime/alias.go create mode 100644 test/infrastructure/tmp-to-be-deleted/runtime/cache/cache.go create mode 100644 test/infrastructure/tmp-to-be-deleted/runtime/cache/cache_test.go create mode 100644 test/infrastructure/tmp-to-be-deleted/runtime/cache/client.go create mode 100644 test/infrastructure/tmp-to-be-deleted/runtime/cache/client_test.go create mode 100644 test/infrastructure/tmp-to-be-deleted/runtime/cache/doc.go create mode 100644 test/infrastructure/tmp-to-be-deleted/runtime/cache/gc.go create mode 100644 test/infrastructure/tmp-to-be-deleted/runtime/cache/gc_test.go create mode 100644 test/infrastructure/tmp-to-be-deleted/runtime/cache/hooks.go create mode 100644 test/infrastructure/tmp-to-be-deleted/runtime/cache/informer.go create mode 100644 test/infrastructure/tmp-to-be-deleted/runtime/cache/sync.go create mode 100644 test/infrastructure/tmp-to-be-deleted/runtime/cache/sync_test.go create mode 100644 test/infrastructure/tmp-to-be-deleted/runtime/client/client.go create mode 100644 test/infrastructure/tmp-to-be-deleted/runtime/client/doc.go create mode 100644 test/infrastructure/tmp-to-be-deleted/runtime/doc.go create mode 100644 test/infrastructure/tmp-to-be-deleted/runtime/manager/doc.go create mode 100644 test/infrastructure/tmp-to-be-deleted/runtime/manager/manager.go create mode 100644 test/infrastructure/tmp-to-be-deleted/runtime/resourcegroup/cached_resourcegroup.go create mode 100644 test/infrastructure/tmp-to-be-deleted/runtime/resourcegroup/doc.go create mode 100644 test/infrastructure/tmp-to-be-deleted/runtime/resourcegroup/resourcegroup.go create mode 100644 test/infrastructure/tmp-to-be-deleted/server/api/const.go create mode 100644 test/infrastructure/tmp-to-be-deleted/server/api/debug.go create mode 100644 test/infrastructure/tmp-to-be-deleted/server/api/doc.go create mode 100644 test/infrastructure/tmp-to-be-deleted/server/api/handler.go create mode 100644 test/infrastructure/tmp-to-be-deleted/server/api/metrics.go create mode 100644 test/infrastructure/tmp-to-be-deleted/server/api/portforward/doc.go create mode 100644 test/infrastructure/tmp-to-be-deleted/server/api/portforward/httpstreams.go create mode 100644 test/infrastructure/tmp-to-be-deleted/server/api/watch.go create mode 100644 test/infrastructure/tmp-to-be-deleted/server/certs.go create mode 100644 test/infrastructure/tmp-to-be-deleted/server/doc.go create mode 100644 test/infrastructure/tmp-to-be-deleted/server/etcd/doc.go create mode 100644 test/infrastructure/tmp-to-be-deleted/server/etcd/handler.go create mode 100644 test/infrastructure/tmp-to-be-deleted/server/etcd/handler_test.go create mode 100644 test/infrastructure/tmp-to-be-deleted/server/etcd/metrics.go create mode 100644 test/infrastructure/tmp-to-be-deleted/server/listener.go create mode 100644 test/infrastructure/tmp-to-be-deleted/server/mux.go create mode 100644 test/infrastructure/tmp-to-be-deleted/server/mux_test.go create mode 100644 test/infrastructure/tmp-to-be-deleted/server/proxy/addr.go create mode 100644 test/infrastructure/tmp-to-be-deleted/server/proxy/conn.go create mode 100644 test/infrastructure/tmp-to-be-deleted/server/proxy/dial.go create mode 100644 test/infrastructure/tmp-to-be-deleted/server/proxy/doc.go create mode 100644 test/infrastructure/tmp-to-be-deleted/server/proxy/proxy.go create mode 100644 test/infrastructure/vcsim/Dockerfile create mode 100644 test/infrastructure/vcsim/README.md create mode 100644 test/infrastructure/vcsim/api/v1alpha1/.import-restrictions create mode 100644 test/infrastructure/vcsim/api/v1alpha1/controlplaneendpoint_types.go create mode 100644 test/infrastructure/vcsim/api/v1alpha1/doc.go create mode 100644 test/infrastructure/vcsim/api/v1alpha1/envvar_types.go create mode 100644 test/infrastructure/vcsim/api/v1alpha1/groupversion_info.go create mode 100644 test/infrastructure/vcsim/api/v1alpha1/vcsim_types.go create mode 100644 test/infrastructure/vcsim/api/v1alpha1/zz_generated.deepcopy.go create mode 100644 test/infrastructure/vcsim/config/certmanager/certificate.yaml create mode 100644 test/infrastructure/vcsim/config/certmanager/kustomization.yaml create mode 100644 test/infrastructure/vcsim/config/certmanager/kustomizeconfig.yaml create mode 100644 test/infrastructure/vcsim/config/crd/bases/vcsim.infrastructure.cluster.x-k8s.io_controlplaneendpoints.yaml create mode 100644 test/infrastructure/vcsim/config/crd/bases/vcsim.infrastructure.cluster.x-k8s.io_envvars.yaml create mode 100644 test/infrastructure/vcsim/config/crd/bases/vcsim.infrastructure.cluster.x-k8s.io_vcentersimulators.yaml create mode 100644 test/infrastructure/vcsim/config/crd/kustomization.yaml create mode 100644 test/infrastructure/vcsim/config/crd/kustomizeconfig.yaml create mode 100644 test/infrastructure/vcsim/config/crd/patches/cainjection_in_controlplaneendpoints.yaml create mode 100644 test/infrastructure/vcsim/config/crd/patches/cainjection_in_envvars.yaml create mode 100644 test/infrastructure/vcsim/config/crd/patches/cainjection_in_vcentersimulators.yaml create mode 100644 test/infrastructure/vcsim/config/default/kustomization.yaml create mode 100644 test/infrastructure/vcsim/config/default/kustomizeconfig.yaml create mode 100644 test/infrastructure/vcsim/config/default/manager_image_patch.yaml create mode 100644 test/infrastructure/vcsim/config/default/manager_pull_policy.yaml create mode 100644 test/infrastructure/vcsim/config/default/manager_webhook_patch.yaml create mode 100644 test/infrastructure/vcsim/config/default/namespace.yaml create mode 100644 test/infrastructure/vcsim/config/manager/kustomization.yaml create mode 100644 test/infrastructure/vcsim/config/manager/manager.yaml create mode 100644 test/infrastructure/vcsim/config/rbac/kustomization.yaml create mode 100644 test/infrastructure/vcsim/config/rbac/role.yaml create mode 100644 test/infrastructure/vcsim/config/rbac/role_binding.yaml create mode 100644 test/infrastructure/vcsim/config/rbac/service_account.yaml create mode 100644 test/infrastructure/vcsim/config/webhook/kustomization.yaml create mode 100644 test/infrastructure/vcsim/config/webhook/kustomizeconfig.yaml create mode 100644 test/infrastructure/vcsim/config/webhook/service.yaml create mode 100644 test/infrastructure/vcsim/controllers/controlplaneendpoint_controller.go create mode 100644 test/infrastructure/vcsim/controllers/controlplaneendpoint_controller_test.go create mode 100644 test/infrastructure/vcsim/controllers/doc.go create mode 100644 test/infrastructure/vcsim/controllers/envvar_controller.go create mode 100644 test/infrastructure/vcsim/controllers/images/images.go create mode 100644 test/infrastructure/vcsim/controllers/images/ttylinux-pc_i486-16.1.ovf create mode 100644 test/infrastructure/vcsim/controllers/vcsim.go create mode 100644 test/infrastructure/vcsim/controllers/vcsim_controller.go create mode 100644 test/infrastructure/vcsim/controllers/vcsim_controller_test.go create mode 100644 test/infrastructure/vcsim/controllers/vcsim_test.go create mode 100644 test/infrastructure/vcsim/controllers/virtualmachine_controller.go create mode 100644 test/infrastructure/vcsim/controllers/virtualmachine_controller_test.go create mode 100644 test/infrastructure/vcsim/controllers/vmbootstrap_controller.go create mode 100644 test/infrastructure/vcsim/controllers/vmip_controller.go create mode 100644 test/infrastructure/vcsim/controllers/vspherevm_controller.go create mode 100644 test/infrastructure/vcsim/controllers/vspherevm_controller_test.go create mode 100644 test/infrastructure/vcsim/main.go create mode 100755 test/infrastructure/vcsim/scripts/vcsim.sh create mode 100644 test/infrastructure/vcsim/tilt-provider.json diff --git a/Makefile b/Makefile index d32da6b72a..90b0290ad5 100644 --- a/Makefile +++ b/Makefile @@ -208,6 +208,10 @@ STAGING_BUCKET ?= artifacts.k8s-staging-capi-vsphere.appspot.com IMAGE_NAME ?= cluster-api-vsphere-controller CONTROLLER_IMG ?= $(REGISTRY)/$(IMAGE_NAME) +# vcsim controller +VCSIM_IMAGE_NAME ?= cluster-api-vcsim-controller +VCSIM_CONTROLLER_IMG ?= $(REGISTRY)/$(VCSIM_IMAGE_NAME) + # It is set by Prow GIT_TAG, a git-based tag of the form vYYYYMMDD-hash, e.g., v20210120-v0.3.10-308-gc61521971 TAG ?= dev @@ -233,8 +237,10 @@ MANIFEST_ROOT ?= ./config CRD_ROOT ?= $(MANIFEST_ROOT)/default/crd/bases SUPERVISOR_CRD_ROOT ?= $(MANIFEST_ROOT)/supervisor/crd VMOP_CRD_ROOT ?= $(MANIFEST_ROOT)/deployments/integration-tests/crds +VCSIM_CRD_ROOT ?= test/infrastructure/vcsim/config/crd/bases WEBHOOK_ROOT ?= $(MANIFEST_ROOT)/webhook RBAC_ROOT ?= $(MANIFEST_ROOT)/rbac +VCSIM_RBAC_ROOT ?= test/infrastructure/vcsim/config/rbac VERSION ?= $(shell cat clusterctl-settings.json | jq .config.nextVersion -r) OVERRIDES_DIR := $(HOME)/.cluster-api/overrides/infrastructure-vsphere/$(VERSION) @@ -275,6 +281,15 @@ generate-manifests: $(CONTROLLER_GEN) ## Generate manifests e.g. CRD, RBAC etc. paths=github.com/vmware-tanzu/vm-operator/api/v1alpha1/... \ crd:crdVersions=v1 \ output:crd:dir=$(VMOP_CRD_ROOT) + # vcsim crds are used for tests. + $(CONTROLLER_GEN) \ + paths=./test/infrastructure/vcsim/api/v1alpha1 \ + crd:crdVersions=v1 \ + output:crd:dir=$(VCSIM_CRD_ROOT) + $(CONTROLLER_GEN) \ + paths=./test/infrastructure/vcsim/controllers/... \ + output:rbac:dir=$(VCSIM_RBAC_ROOT) \ + rbac:roleName=manager-role .PHONY: generate-go-deepcopy generate-go-deepcopy: $(CONTROLLER_GEN) ## Generate deepcopy go code for core @@ -282,6 +297,9 @@ generate-go-deepcopy: $(CONTROLLER_GEN) ## Generate deepcopy go code for core $(CONTROLLER_GEN) \ object:headerFile=./hack/boilerplate/boilerplate.generatego.txt \ paths=./apis/... + $(CONTROLLER_GEN) \ + object:headerFile=./hack/boilerplate/boilerplate.generatego.txt \ + paths=./test/infrastructure/vcsim/api/... .PHONY: generate-modules generate-modules: ## Run go mod tidy to ensure modules are up to date @@ -470,6 +488,14 @@ docker-build: docker-pull-prerequisites ## Build the docker image for vsphere co $(MAKE) set-manifest-pull-policy TARGET_RESOURCE="./config/base/manager_pull_policy.yaml"; \ fi +.PHONY: docker-build-vcsim +docker-build-vcsim: docker-pull-prerequisites ## Build the docker image for vcsim controller manager + DOCKER_BUILDKIT=1 docker build --platform linux/$(ARCH) --build-arg GOLANG_VERSION=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ldflags="$(LDFLAGS)" . -t $(VCSIM_CONTROLLER_IMG)-$(ARCH):$(TAG) + @if [ "${DOCKER_BUILD_MODIFY_MANIFESTS}" = "true" ]; then \ + $(MAKE) set-manifest-image MANIFEST_IMG=$(VCSIM_CONTROLLER_IMG)-$(ARCH) MANIFEST_TAG=$(TAG) TARGET_RESOURCE="./test/infrastructure/vcsim/config/default/manager_image_patch.yaml"; \ + $(MAKE) set-manifest-pull-policy TARGET_RESOURCE="./test/infrastructure/vcsim/config/default/manager_pull_policy.yaml"; \ + fi + ## -------------------------------------- ## Testing ## -------------------------------------- diff --git a/internal/test/helpers/vcsim/builder.go b/internal/test/helpers/vcsim/builder.go index 2ad0143cce..65fdcb7c8c 100644 --- a/internal/test/helpers/vcsim/builder.go +++ b/internal/test/helpers/vcsim/builder.go @@ -19,6 +19,7 @@ package vcsim import ( "crypto/tls" "fmt" + "net/url" "os" "os/exec" "strings" @@ -30,8 +31,10 @@ import ( // Builder helps in creating a vcsim simulator. type Builder struct { - model *simulator.Model - operations []string + skipModelCreate bool + url *url.URL + model *simulator.Model + operations []string } // NewBuilder returns a new a Builder. @@ -45,6 +48,19 @@ func (b *Builder) WithModel(model *simulator.Model) *Builder { return b } +// SkipModelCreate tells the builder to skip creating the model, because it is already created before passing it +// to WithModel. +func (b *Builder) SkipModelCreate() *Builder { + b.skipModelCreate = true + return b +} + +// WithURL defines the url to be used for service listening. +func (b *Builder) WithURL(url *url.URL) *Builder { + b.url = url + return b +} + // WithOperations defines the operation that the Builder should executed on the newly created vcsim instance. func (b *Builder) WithOperations(ops ...string) *Builder { b.operations = append(b.operations, ops...) @@ -53,9 +69,15 @@ func (b *Builder) WithOperations(ops ...string) *Builder { // Build the vcsim instance. func (b *Builder) Build() (*Simulator, error) { - err := b.model.Create() - if err != nil { - return nil, err + if !b.skipModelCreate { + err := b.model.Create() + if err != nil { + return nil, err + } + } + + if b.url != nil { + b.model.Service.Listen = b.url } b.model.Service.TLS = new(tls.Config) diff --git a/pkg/util/supervisor.go b/pkg/util/supervisor.go index fcff06ee3b..3da59197ba 100644 --- a/pkg/util/supervisor.go +++ b/pkg/util/supervisor.go @@ -17,13 +17,17 @@ limitations under the License. package util import ( + "context" "fmt" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" ) // SetControllerReferenceWithOverride sets owner as a Controller OwnerReference on controlled. @@ -82,3 +86,26 @@ func referSameObject(a, b metav1.OwnerReference) bool { return aGV.Group == bGV.Group && a.Kind == b.Kind && a.Name == b.Name } + +// GetOwnerVMWareMachine returns the vmwarev1.VSphereMachine owner for the passed object. +func GetOwnerVMWareMachine(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*vmwarev1.VSphereMachine, error) { + for _, ref := range obj.OwnerReferences { + gv, err := schema.ParseGroupVersion(ref.APIVersion) + if err != nil { + return nil, err + } + if ref.Kind == "VSphereMachine" && gv.Group == vmwarev1.GroupVersion.Group { + return getVMWareMachineByName(ctx, c, obj.Namespace, ref.Name) + } + } + return nil, nil +} + +func getVMWareMachineByName(ctx context.Context, c client.Client, namespace, name string) (*vmwarev1.VSphereMachine, error) { + m := &vmwarev1.VSphereMachine{} + key := client.ObjectKey{Name: name, Namespace: namespace} + if err := c.Get(ctx, key, m); err != nil { + return nil, err + } + return m, nil +} diff --git a/test/framework/vmoperator/doc.go b/test/framework/vmoperator/doc.go new file mode 100644 index 0000000000..f5fb66e584 --- /dev/null +++ b/test/framework/vmoperator/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package vmoperator contains utils to run tests with the vm-operator in standalone mode. +package vmoperator diff --git a/test/framework/vmoperator/vmoperator.go b/test/framework/vmoperator/vmoperator.go new file mode 100644 index 0000000000..80933c407b --- /dev/null +++ b/test/framework/vmoperator/vmoperator.go @@ -0,0 +1,576 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vmoperator + +import ( + "bytes" + "context" + "fmt" + "net" + "net/url" + "strings" + + "github.com/pkg/errors" + vmoprv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha1" + topologyv1 "github.com/vmware-tanzu/vm-operator/external/tanzu-topology/api/v1alpha1" + "github.com/vmware/govmomi/pbm" + "github.com/vmware/govmomi/vapi/library" + "github.com/vmware/govmomi/vapi/rest" + "github.com/vmware/govmomi/vim25/soap" + corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + "sigs.k8s.io/cluster-api-provider-vsphere/packaging/flavorgen/flavors/util" + "sigs.k8s.io/cluster-api-provider-vsphere/pkg/session" +) + +const DefaultNamespace = "vmware-system-vmop" + +const ( + // NOTE: const below are copied from pkg/vmprovider/providers/vsphere/config/config.go. + // int the vm-operator project. + + providerConfigMapName = "vsphere.provider.config.vmoperator.vmware.com" + vcPNIDKey = "VcPNID" + vcPortKey = "VcPort" + vcCredsSecretNameKey = "VcCredsSecretName" //nolint:gosec + datacenterKey = "Datacenter" + resourcePoolKey = "ResourcePool" + folderKey = "Folder" + datastoreKey = "Datastore" + networkNameKey = "Network" + scRequiredKey = "StorageClassRequired" + useInventoryKey = "UseInventoryAsContentSource" + insecureSkipTLSVerifyKey = "InsecureSkipTLSVerify" + caFilePathKey = "CAFilePath" +) + +type ContentLibraryItemFilesConfig struct { + Name string + Content []byte +} + +type ContentLibraryItemConfig struct { + Name string + Files []ContentLibraryItemFilesConfig + ItemType string + ProductInfo string + OSInfo string +} + +type ContentLibraryConfig struct { + Name string + Datastore string + Item ContentLibraryItemConfig +} + +type VCenterClusterConfig struct { + ServerURL string + Username string + Password string + Thumbprint string + + // supervisor is based on a single vCenter cluster + Datacenter string + Cluster string + Folder string + ResourcePool string + StoragePolicyID string + ContentLibrary ContentLibraryConfig +} + +type UserNamespaceConfig struct { + Name string + StorageClass string +} + +// Dependencies models dependencies for the vm-operator. +type Dependencies struct { + // This is the namespace where is deployed the vm-operator + Namespace string + + // Info about the vCenter cluster the vm-operator is bound to + VCenterCluster VCenterClusterConfig + + // Info about where the users are expected to store Cluster API clusters to be managed by the vm-operator + UserNamespace UserNamespaceConfig +} + +// ReconcileDependencies reconciles dependencies for the vm-operator. +// NOTE: This func is idempotent, it creates objects if missing otherwise it uses existing ones +// (this will allow e.g. to update images once and re-use for many test run). +func ReconcileDependencies(ctx context.Context, c client.Client, config Dependencies) error { + log := ctrl.LoggerFrom(ctx) + log.Info("Reconciling dependencies for the VMOperator Deployment") + + // Get a Client to VCenter and get holds on the relevant objects that should already exist + params := session.NewParams(). + WithServer(config.VCenterCluster.ServerURL). + WithThumbprint(config.VCenterCluster.Thumbprint). + WithUserInfo(config.VCenterCluster.Username, config.VCenterCluster.Password) + + s, err := session.GetOrCreate(ctx, params) + if err != nil { + return errors.Wrapf(err, "failed to connect to vCenter Server instance to read dependency references") + } + + datacenter, err := s.Finder.Datacenter(ctx, config.VCenterCluster.Datacenter) + if err != nil { + return errors.Wrapf(err, "failed to get datacenter %s", config.VCenterCluster.Datacenter) + } + + cluster, err := s.Finder.ClusterComputeResource(ctx, config.VCenterCluster.Cluster) + if err != nil { + return errors.Wrapf(err, "failed to get cluster %s", config.VCenterCluster.Cluster) + } + + folder, err := s.Finder.Folder(ctx, config.VCenterCluster.Folder) + if err != nil { + return errors.Wrapf(err, "failed to get folder %s", config.VCenterCluster.Folder) + } + + resourcePool, err := s.Finder.ResourcePool(ctx, config.VCenterCluster.ResourcePool) + if err != nil { + return errors.Wrapf(err, "failed to get resourcePool %s", config.VCenterCluster.ResourcePool) + } + + contentLibraryDatastore, err := s.Finder.Datastore(ctx, config.VCenterCluster.ContentLibrary.Datastore) + if err != nil { + return errors.Wrapf(err, "failed to get contentLibraryDatastore %s", config.VCenterCluster.ContentLibrary.Datastore) + } + + pbmClient, err := pbm.NewClient(ctx, s.Client.Client) + if err != nil { + return errors.Wrap(err, "failed to get storage policy client") + } + + storagePolicyID, err := pbmClient.ProfileIDByName(ctx, config.VCenterCluster.StoragePolicyID) + if err != nil { + return errors.Wrapf(err, "failed to get storage policy profile %s", config.VCenterCluster.StoragePolicyID) + } + + // Create StorageClass & bind it to the user namespace via a ResourceQuota + // NOTE: vm-operator is using the ResourceQuota to figure out which StorageClass can be used from a namespace + // TODO: consider if we want to support more than one storage class + + storageClass := &storagev1.StorageClass{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: config.UserNamespace.StorageClass, + }, + Provisioner: "kubernetes.io/vsphere-volume", + Parameters: map[string]string{ + "storagePolicyID": storagePolicyID, + }, + } + + if err := c.Get(ctx, client.ObjectKeyFromObject(storageClass), storageClass); err != nil { + if !apierrors.IsNotFound(err) { + return errors.Wrapf(err, "failed to get vm-operator StorageClass %s", storageClass.Name) + } + + if err := c.Create(ctx, storageClass); err != nil { + return errors.Wrapf(err, "failed to create vm-operator StorageClass %s", storageClass.Name) + } + log.Info("Created vm-operator StorageClass", "StorageClass", klog.KObj(storageClass)) + } + + // TODO: rethink about this, for now we are creating a ResourceQuota with the same name of the StorageClass, might be this is not ok when hooking into a real vCenter + resourceQuota := &corev1.ResourceQuota{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: config.UserNamespace.StorageClass, + Namespace: config.UserNamespace.Name, + }, + Spec: corev1.ResourceQuotaSpec{ + Hard: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceName(fmt.Sprintf("%s.storageclass.storage.k8s.io/requests.storage", storageClass.Name)): resource.MustParse("1Gi"), + }, + }, + } + + if err := c.Get(ctx, client.ObjectKeyFromObject(resourceQuota), resourceQuota); err != nil { + if !apierrors.IsNotFound(err) { + return errors.Wrapf(err, "failed to get vm-operator ResourceQuota %s", resourceQuota.Name) + } + + if err := c.Create(ctx, resourceQuota); err != nil { + return errors.Wrapf(err, "failed to create vm-operator ResourceQuota %s", resourceQuota.Name) + } + log.Info("Created vm-operator ResourceQuota", "ResourceQuota", klog.KObj(resourceQuota)) + } + + // Create Availability zones CR in K8s and bind them to the user namespace + // NOTE: For now we are creating one availability zone for the cluster as in the example cluster + // TODO: investigate what options exists to create availability zones, and if we want to support more + + availabilityZone := &topologyv1.AvailabilityZone{ + ObjectMeta: metav1.ObjectMeta{ + Name: strings.ReplaceAll(strings.ReplaceAll(strings.ToLower(strings.TrimPrefix(config.VCenterCluster.Cluster, "/")), "_", "-"), "/", "-"), + }, + Spec: topologyv1.AvailabilityZoneSpec{ + ClusterComputeResourceMoId: cluster.Reference().Value, + Namespaces: map[string]topologyv1.NamespaceInfo{ + config.UserNamespace.Name: { + PoolMoId: resourcePool.Reference().Value, + FolderMoId: folder.Reference().Value, + }, + }, + }, + } + + if err := c.Get(ctx, client.ObjectKeyFromObject(availabilityZone), availabilityZone); err != nil { + if !apierrors.IsNotFound(err) { + return errors.Wrapf(err, "failed to get AvailabilityZone %s", availabilityZone.Name) + } + + if err := c.Create(ctx, availabilityZone); err != nil { + return errors.Wrapf(err, "failed to create AvailabilityZone %s", availabilityZone.Name) + } + log.Info("Created vm-operator AvailabilityZone", "AvailabilityZone", klog.KObj(availabilityZone)) + } + + // Create vm-operator Secret in K8s + // This secret contains credentials to access vCenter the vm-operator acts on. + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: providerConfigMapName, // using the same name of the config map for consistency. + Namespace: config.Namespace, + }, + Data: map[string][]byte{ + "username": []byte(config.VCenterCluster.Username), + "password": []byte(config.VCenterCluster.Password), + }, + Type: corev1.SecretTypeOpaque, + } + if err := c.Get(ctx, client.ObjectKeyFromObject(secret), secret); err != nil { + if !apierrors.IsNotFound(err) { + return errors.Wrapf(err, "failed to get vm-operator Secret %s", secret.Name) + } + if err := c.Create(ctx, secret); err != nil { + return errors.Wrapf(err, "failed to create vm-operator Secret %s", secret.Name) + } + log.Info("Created vm-operator Secret", "Secret", klog.KObj(secret)) + } + + // Create vm-operator ConfigMap in K8s + // This ConfigMap contains settings for the vm-operator instance. + + host, port, err := net.SplitHostPort(config.VCenterCluster.ServerURL) + if err != nil { + return errors.Wrapf(err, "failed to split host %s", config.VCenterCluster.ServerURL) + } + + providerConfigMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: providerConfigMapName, + Namespace: config.Namespace, + }, + Data: map[string]string{ + caFilePathKey: "", // Leaving this empty because we don't have (yet) a solution to inject a CA file into the vm-operator pod. + datastoreKey: "", // It seems it is ok to leave it empty. + datacenterKey: datacenter.Reference().Value, + folderKey: folder.Reference().Value, + insecureSkipTLSVerifyKey: "true", // Using this given that we don't have (yet) a solution to inject a CA file into the vm-operator pod. + networkNameKey: "", // It seems it is ok to leave it empty. + resourcePoolKey: resourcePool.Reference().Value, + scRequiredKey: "true", + useInventoryKey: "false", + vcCredsSecretNameKey: secret.Name, + vcPNIDKey: host, + vcPortKey: port, + }, + } + if err := c.Get(ctx, client.ObjectKeyFromObject(providerConfigMap), providerConfigMap); err != nil { + if !apierrors.IsNotFound(err) { + return errors.Wrapf(err, "failed to get vm-operator ConfigMap %s", providerConfigMap.Name) + } + if err := c.Create(ctx, providerConfigMap); err != nil { + return errors.Wrapf(err, "failed to create vm-operator ConfigMap %s", providerConfigMap.Name) + } + log.Info("Created vm-operator ConfigMap", "ConfigMap", klog.KObj(providerConfigMap)) + } + + // Create VirtualMachineClass in K8s and bind it to the user namespace + // TODO: figure out if to add more vm classes / if to make them configurable via config + vmClass := &vmoprv1.VirtualMachineClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "best-effort-2xlarge", + }, + Spec: vmoprv1.VirtualMachineClassSpec{ + Hardware: vmoprv1.VirtualMachineClassHardware{ + Cpus: 8, + Memory: resource.MustParse("64G"), + }, + }, + } + if err := c.Get(ctx, client.ObjectKeyFromObject(vmClass), vmClass); err != nil { + if !apierrors.IsNotFound(err) { + return errors.Wrapf(err, "failed to get vm-operator VirtualMachineClass %s", vmClass.Name) + } + if err := c.Create(ctx, vmClass); err != nil { + return errors.Wrapf(err, "failed to create vm-operator VirtualMachineClass %s", vmClass.Name) + } + log.Info("Created vm-operator VirtualMachineClass", "VirtualMachineClass", klog.KObj(vmClass)) + } + + vmClassBinding := &vmoprv1.VirtualMachineClassBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: vmClass.Name, + Namespace: config.UserNamespace.Name, + }, + ClassRef: vmoprv1.ClassReference{ + APIVersion: vmoprv1.SchemeGroupVersion.String(), + Kind: util.TypeToKind(&vmoprv1.VirtualMachineClass{}), + Name: vmClass.Name, + }, + } + if err := c.Get(ctx, client.ObjectKeyFromObject(vmClassBinding), vmClassBinding); err != nil { + if !apierrors.IsNotFound(err) { + return errors.Wrapf(err, "failed to get vm-operator VirtualMachineClassBinding %s", vmClassBinding.Name) + } + if err := c.Create(ctx, vmClassBinding); err != nil { + return errors.Wrapf(err, "failed to create vm-operator VirtualMachineClassBinding %s", vmClassBinding.Name) + } + log.Info("Created vm-operator VirtualMachineClassBinding", "VirtualMachineClassBinding", klog.KObj(vmClassBinding)) + } + + // Create a ContentLibrary in K8s and in vCenter, bind it to the K8s namespace + // This requires a set of objects in vCenter(or vcsim) as well as their mapping in K8s + // - vCenter: a Library containing an Item + // - k8s: ContentLibraryProvider, ContentSource (both representing the library), a VirtualMachineImage (representing the Item) + + restClient := rest.NewClient(s.Client.Client) + if err := restClient.Login(ctx, url.UserPassword(config.VCenterCluster.Username, config.VCenterCluster.Password)); err != nil { + return errors.Wrap(err, "failed to login using the rest client") + } + + libMgr := library.NewManager(restClient) + + contentLibrary := library.Library{ + Name: config.VCenterCluster.ContentLibrary.Name, + Type: "LOCAL", + Storage: []library.StorageBackings{ + { + DatastoreID: contentLibraryDatastore.Reference().Value, + Type: "DATASTORE", + }, + }, + } + libraries, err := libMgr.GetLibraries(ctx) + if err != nil { + return errors.Wrap(err, "failed to get ContentLibraries") + } + + var contentLibraryID string + if len(libraries) > 0 { + for i := range libraries { + if libraries[i].Name == contentLibrary.Name { + contentLibraryID = libraries[i].ID + break + } + } + } + + if contentLibraryID == "" { + id, err := libMgr.CreateLibrary(ctx, contentLibrary) + if err != nil { + return errors.Wrapf(err, "failed to create vm-operator ContentLibrary %s", contentLibrary.Name) + } + log.Info("Created vm-operator ContentLibrary in vCenter", "ContentLibrary", contentLibrary.Name) + contentLibraryID = id + } + + contentSource := &vmoprv1.ContentSource{ + ObjectMeta: metav1.ObjectMeta{ + Name: contentLibraryID, + }, + Spec: vmoprv1.ContentSourceSpec{ + ProviderRef: vmoprv1.ContentProviderReference{ + Name: contentLibraryID, // NOTE: this should match the ContentLibraryProvider name below + Kind: "ContentLibraryProvider", + }, + }, + } + if err := c.Get(ctx, client.ObjectKeyFromObject(contentSource), contentSource); err != nil { + if !apierrors.IsNotFound(err) { + return errors.Wrapf(err, "failed to get vm-operator ContentSource %s", contentSource.Name) + } + if err := c.Create(ctx, contentSource); err != nil { + return errors.Wrapf(err, "failed to create vm-operator ContentSource %s", contentSource.Name) + } + log.Info("Created vm-operator ContentSource", "ContentSource", klog.KObj(contentSource)) + } + + contentSourceBinding := &vmoprv1.ContentSourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: contentLibraryID, + Namespace: config.UserNamespace.Name, + }, + ContentSourceRef: vmoprv1.ContentSourceReference{ + APIVersion: vmoprv1.SchemeGroupVersion.String(), + Kind: util.TypeToKind(&vmoprv1.ContentSource{}), + Name: contentSource.Name, + }, + } + if err := c.Get(ctx, client.ObjectKeyFromObject(contentSourceBinding), contentSourceBinding); err != nil { + if !apierrors.IsNotFound(err) { + return errors.Wrapf(err, "failed to get vm-operator ContentSourceBinding %s", contentSourceBinding.Name) + } + if err := c.Create(ctx, contentSourceBinding); err != nil { + return errors.Wrapf(err, "failed to create vm-operator ContentSourceBinding %s", contentSourceBinding.Name) + } + log.Info("Created vm-operator ContentSourceBinding", "ContentSourceBinding", klog.KObj(contentSourceBinding)) + } + + contentLibraryProvider := &vmoprv1.ContentLibraryProvider{ + ObjectMeta: metav1.ObjectMeta{ + Name: contentLibraryID, + }, + Spec: vmoprv1.ContentLibraryProviderSpec{ + UUID: contentLibraryID, + }, + } + + if err := controllerutil.SetOwnerReference(contentSource, contentLibraryProvider, c.Scheme()); err != nil { + return errors.Wrap(err, "failed to set ContentLibraryProvider owner") + } + if err := c.Get(ctx, client.ObjectKeyFromObject(contentSource), contentLibraryProvider); err != nil { + if !apierrors.IsNotFound(err) { + return errors.Wrapf(err, "failed to get vm-operator ContentLibraryProvider %s", contentLibraryProvider.Name) + } + if err := c.Create(ctx, contentLibraryProvider); err != nil { + return errors.Wrapf(err, "failed to create vm-operator ContentLibraryProvider %s", contentLibraryProvider.Name) + } + log.Info("Created vm-operator ContentLibraryProvider", "ContentSource", klog.KObj(contentSource), "ContentLibraryProvider", klog.KObj(contentLibraryProvider)) + } + + libraryItem := library.Item{ + Name: config.VCenterCluster.ContentLibrary.Item.Name, + Type: config.VCenterCluster.ContentLibrary.Item.ItemType, + LibraryID: contentLibraryID, + } + + items, err := libMgr.GetLibraryItems(ctx, contentLibraryID) + if err != nil { + return errors.Wrap(err, "failed to get ContentLibraryItems") + } + + var libraryItemID string + for _, item := range items { + if item.Name == libraryItem.Name { + libraryItemID = item.ID + break + } + } + + if libraryItemID == "" { + id, err := libMgr.CreateLibraryItem(ctx, libraryItem) + if err != nil { + return errors.Wrapf(err, "failed to create vm-operator ContentLibraryItem %s", libraryItem.Name) + } + log.Info("Created vm-operator LibraryItem in vCenter", "ContentLibrary", contentLibrary.Name, "LibraryItem", libraryItem.Name) + libraryItemID = id + } + + virtualMachineImage := &vmoprv1.VirtualMachineImage{ + ObjectMeta: metav1.ObjectMeta{ + Name: libraryItem.Name, + }, + Spec: vmoprv1.VirtualMachineImageSpec{ + ProductInfo: vmoprv1.VirtualMachineImageProductInfo{ + FullVersion: config.VCenterCluster.ContentLibrary.Item.ProductInfo, + }, + OSInfo: vmoprv1.VirtualMachineImageOSInfo{ + Type: config.VCenterCluster.ContentLibrary.Item.OSInfo, + }, + }, + } + + if err := controllerutil.SetOwnerReference(contentLibraryProvider, virtualMachineImage, c.Scheme()); err != nil { + return errors.Wrap(err, "failed to set VirtualMachineImage owner") + } + if err := c.Get(ctx, client.ObjectKeyFromObject(virtualMachineImage), virtualMachineImage); err != nil { + if !apierrors.IsNotFound(err) { + return errors.Wrapf(err, "failed to get vm-operator VirtualMachineImage %s", virtualMachineImage.Name) + } + if err := c.Create(ctx, virtualMachineImage); err != nil { + return errors.Wrapf(err, "failed to create vm-operator VirtualMachineImage %s", virtualMachineImage.Name) + } + log.Info("Created vm-operator VirtualMachineImage", "ContentSource", klog.KObj(contentSource), "ContentLibraryProvider", klog.KObj(contentLibraryProvider), "VirtualMachineImage", klog.KObj(virtualMachineImage)) + } + + existingFiles, err := libMgr.ListLibraryItemFiles(ctx, libraryItemID) + if err != nil { + return errors.Wrapf(err, "failed to list files for vm-operator libraryItem %s", libraryItem.Name) + } + + uploadFunc := func(sessionID, file string, content []byte) error { + info := library.UpdateFile{ + Name: file, + SourceType: "PUSH", + Size: int64(len(content)), + } + + update, err := libMgr.AddLibraryItemFile(ctx, sessionID, info) + if err != nil { + return err + } + + u, err := url.Parse(update.UploadEndpoint.URI) + if err != nil { + return err + } + + p := soap.DefaultUpload + p.ContentLength = info.Size + + return libMgr.Client.Upload(ctx, bytes.NewReader(content), u, &p) + } + + for _, file := range config.VCenterCluster.ContentLibrary.Item.Files { + exists := false + for _, existingFile := range existingFiles { + if file.Name == existingFile.Name { + exists = true + } + } + if exists { + continue + } + + sessionID, err := libMgr.CreateLibraryItemUpdateSession(ctx, library.Session{LibraryItemID: libraryItemID}) + if err != nil { + return errors.Wrapf(err, "failed to start update session for vm-operator libraryItem %s", libraryItem.Name) + } + if err := uploadFunc(sessionID, file.Name, file.Content); err != nil { + return errors.Wrapf(err, "failed to upload data for vm-operator libraryItem %s", libraryItem.Name) + } + if err := libMgr.CompleteLibraryItemUpdateSession(ctx, sessionID); err != nil { + return errors.Wrapf(err, "failed to complete update session for vm-operator libraryItem %s", libraryItem.Name) + } + log.Info("Uploaded vm-operator LibraryItemFile in vCenter", "ContentLibrary", contentLibrary.Name, "libraryItem", libraryItem.Name, "LibraryItemFile", file.Name) + } + return nil +} diff --git a/test/framework/vmoperator/vmoperator_test.go b/test/framework/vmoperator/vmoperator_test.go new file mode 100644 index 0000000000..94d3c61787 --- /dev/null +++ b/test/framework/vmoperator/vmoperator_test.go @@ -0,0 +1,108 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vmoperator + +import ( + "context" + "os" + "testing" + "time" + + . "github.com/onsi/gomega" + vmoprv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha1" + topologyv1 "github.com/vmware-tanzu/vm-operator/external/tanzu-topology/api/v1alpha1" + corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/clientcmd" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" + + vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" +) + +/* +cat << EOF > /tmp/testbed.yaml +ServerURL: "${VSPHERE_SERVER}:443" +Username: "${VSPHERE_USERNAME}" +Password: "${VSPHERE_PASSWORD}" +Thumbprint: "${VSPHERE_TLS_THUMBPRINT}" +Datacenter: "${VSPHERE_DATACENTER}" +Cluster: "/${VSPHERE_DATACENTER}/host/cluster0" +Folder: "${VSPHERE_FOLDER}" +ResourcePool: "/${VSPHERE_DATACENTER}/host/cluster0/Resources/${VSPHERE_RESOURCE_POOL}" +StoragePolicyID: "${VSPHERE_STORAGE_POLICY}" +ContentLibrary: + Name: "capv" + Datastore: "/${VSPHERE_DATACENTER}/datastore/${VSPHERE_DATASTORE}" +EOF. +*/ + +func Test_reconcileVMOperatorDeployment(t *testing.T) { + t.Skip() + + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + _ = storagev1.AddToScheme(scheme) + _ = vmoprv1.AddToScheme(scheme) + _ = vmwarev1.AddToScheme(scheme) + _ = topologyv1.AddToScheme(scheme) + + const ( + kubeconfigPath = "/tmp/capi-test.kubeconfig" + testbedYamlPath = "/tmp/testbed.yaml" + ) + g := NewWithT(t) + + ctx := context.Background() + + vcenterClusterConfig := VCenterClusterConfig{} + testbedData, err := os.ReadFile(testbedYamlPath) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(yaml.Unmarshal(testbedData, &vcenterClusterConfig)).ToNot(HaveOccurred()) + + config := Dependencies{ + Namespace: "vmware-system-vmop", + UserNamespace: UserNamespaceConfig{ + Name: "default", // namespace where we deploy a cluster + StorageClass: "tkg-shared-ds-sp", + }, + VCenterCluster: vcenterClusterConfig, + } + + config.VCenterCluster.ContentLibrary.Item = ContentLibraryItemConfig{ + Name: "ubuntu-2204-kube-v1.29.0", + } + + // create a config + + // Create a client.Client from a kubeconfig + kubeconfig, err := os.ReadFile(kubeconfigPath) + g.Expect(err).ToNot(HaveOccurred()) + + restConfig, err := clientcmd.RESTConfigFromKubeConfig(kubeconfig) + g.Expect(err).ToNot(HaveOccurred()) + + restConfig.Timeout = 10 * time.Second + + c, err := client.New(restConfig, client.Options{Scheme: scheme}) + g.Expect(err).ToNot(HaveOccurred()) + + // reconcile + err = ReconcileDependencies(ctx, c, config) + g.Expect(err).ToNot(HaveOccurred()) +} diff --git a/test/go.mod b/test/go.mod index 173998d787..8fb4d36808 100644 --- a/test/go.mod +++ b/test/go.mod @@ -9,17 +9,30 @@ replace sigs.k8s.io/cluster-api/test => sigs.k8s.io/cluster-api/test v1.6.1 replace sigs.k8s.io/cluster-api-provider-vsphere => ../ require ( + github.com/dougm/pretty v0.0.0-20171025230240-2ee9d7453c02 + github.com/emicklei/go-restful/v3 v3.11.2 + github.com/evanphx/json-patch/v5 v5.7.0 + github.com/go-logr/logr v1.4.1 github.com/google/uuid v1.6.0 github.com/onsi/ginkgo/v2 v2.15.0 github.com/onsi/gomega v1.31.1 github.com/pkg/errors v0.9.1 + github.com/prometheus/client_golang v1.18.0 + github.com/spf13/pflag v1.0.5 github.com/vmware-tanzu/vm-operator/api v1.8.5 + github.com/vmware-tanzu/vm-operator/external/tanzu-topology v0.0.0-20231214185006-5477585eebfd github.com/vmware/govmomi v0.34.2 + go.etcd.io/etcd/api/v3 v3.5.11 + go.etcd.io/etcd/client/v3 v3.5.11 golang.org/x/crypto v0.18.0 + golang.org/x/net v0.20.0 + google.golang.org/grpc v1.59.0 gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.28.6 k8s.io/apimachinery v0.28.6 + k8s.io/apiserver v0.28.6 k8s.io/client-go v0.28.6 + k8s.io/component-base v0.28.6 k8s.io/klog/v2 v2.110.1 k8s.io/utils v0.0.0-20231127182322-b307cd553661 sigs.k8s.io/cluster-api v0.0.0-00010101000000-000000000000 @@ -27,6 +40,7 @@ require ( sigs.k8s.io/cluster-api/test v0.0.0-00010101000000-000000000000 sigs.k8s.io/controller-runtime v0.16.3 sigs.k8s.io/kind v0.20.0 + sigs.k8s.io/yaml v1.4.0 ) require ( @@ -36,6 +50,7 @@ require ( github.com/Masterminds/semver/v3 v3.2.0 // indirect github.com/Masterminds/sprig/v3 v3.2.3 // indirect github.com/Microsoft/go-winio v0.5.0 // indirect + github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 // indirect github.com/adrg/xdg v0.4.0 // indirect github.com/alessio/shellescape v1.4.1 // indirect @@ -44,20 +59,22 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver v3.5.1+incompatible // indirect github.com/blang/semver/v4 v4.0.0 // indirect + github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cloudflare/circl v1.3.7 // indirect + github.com/coreos/go-semver v0.3.1 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/distribution/reference v0.5.0 // indirect github.com/docker/distribution v2.8.3+incompatible // indirect github.com/docker/docker v24.0.7+incompatible // indirect - github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.4.0 // indirect github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46 // indirect - github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect - github.com/evanphx/json-patch/v5 v5.7.0 // indirect - github.com/fsnotify/fsnotify v1.6.0 // indirect - github.com/go-logr/logr v1.4.1 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.22.3 // indirect @@ -74,19 +91,23 @@ require ( github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 // indirect + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/huandu/xstrings v1.3.3 // indirect github.com/imdario/mergo v0.3.13 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/kr/text v0.2.0 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/spdystream v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect @@ -94,49 +115,57 @@ require ( github.com/opencontainers/image-spec v1.0.2 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.1.0 // indirect - github.com/prometheus/client_golang v1.17.0 // indirect - github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect - github.com/prometheus/common v0.44.0 // indirect - github.com/prometheus/procfs v0.11.1 // indirect - github.com/sagikazarmark/locafero v0.3.0 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.45.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/shopspring/decimal v1.3.1 // indirect github.com/sourcegraph/conc v0.3.0 // indirect - github.com/spf13/afero v1.10.0 // indirect - github.com/spf13/cast v1.5.1 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.6.0 // indirect github.com/spf13/cobra v1.8.0 // indirect - github.com/spf13/pflag v1.0.5 // indirect - github.com/spf13/viper v1.17.0 // indirect + github.com/spf13/viper v1.18.2 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/valyala/fastjson v1.6.4 // indirect github.com/vmware-tanzu/net-operator-api v0.0.0-20231019160108-42131d6e8360 // indirect github.com/vmware-tanzu/vm-operator/external/ncp v0.0.0-20231214185006-5477585eebfd // indirect - github.com/vmware-tanzu/vm-operator/external/tanzu-topology v0.0.0-20231214185006-5477585eebfd // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.11 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0 // indirect + go.opentelemetry.io/otel v1.20.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0 // indirect + go.opentelemetry.io/otel/metric v1.20.0 // indirect + go.opentelemetry.io/otel/sdk v1.20.0 // indirect + go.opentelemetry.io/otel/trace v1.20.0 // indirect + go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.25.0 // indirect golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect - golang.org/x/net v0.20.0 // indirect - golang.org/x/oauth2 v0.14.0 // indirect + golang.org/x/oauth2 v0.16.0 // indirect golang.org/x/sync v0.6.0 // indirect golang.org/x/sys v0.16.0 // indirect golang.org/x/term v0.16.0 // indirect golang.org/x/text v0.14.0 // indirect - golang.org/x/time v0.3.0 // indirect + golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.17.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 // indirect + google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiextensions-apiserver v0.28.6 // indirect - k8s.io/apiserver v0.28.6 // indirect k8s.io/cluster-bootstrap v0.28.6 // indirect - k8s.io/component-base v0.28.6 // indirect + k8s.io/kms v0.28.6 // indirect k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect - sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/test/go.sum b/test/go.sum index 792ee5149d..e000c35086 100644 --- a/test/go.sum +++ b/test/go.sum @@ -3,39 +3,27 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= @@ -61,6 +49,7 @@ github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBa github.com/Microsoft/go-winio v0.5.0 h1:Elr9Wn+sGKPlkaBvwu4mTrxtmOp3F3yV9qhaHbXGjwU= github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 h1:wPbRQzjjwFc0ih8puEVAOFGELsn1zoIIYdxvML7mDxA= @@ -83,9 +72,11 @@ github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/g github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -97,6 +88,8 @@ github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnweb github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -109,9 +102,7 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I= github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/coredns/caddy v1.1.1 h1:2eYKZT7i6yxIfGP3qLJoJ7HAsDJqYB+X68g4NYjSrE0= github.com/coredns/corefile-migration v1.0.21 h1:W/DCETrHDiFo0Wj03EyMkaQ9fwsmSgqTCQDHpceaSsE= @@ -120,8 +111,12 @@ github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= +github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= @@ -143,28 +138,28 @@ github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBi github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dougm/pretty v0.0.0-20171025230240-2ee9d7453c02 h1:tR3jsKPiO/mb6ntzk/dJlHZtm37CPfVp1C9KIo534+4= +github.com/dougm/pretty v0.0.0-20171025230240-2ee9d7453c02/go.mod h1:7NQ3kWOx2cZOSjtcveTa5nqupVr2s6/83sG+rTlI7uA= github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46 h1:7QPwrLT79GlD5sizHf27aoY2RTvw62mO6x7mxkScNk0= github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46/go.mod h1:esf2rsHFNlZlxsqsZDojNBcnNs5REqIvRrWRHqX0vEU= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= -github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/emicklei/go-restful/v3 v3.11.2 h1:1onLa9DcsMYO9P+CXaL0dStDqQ2EHHXLiz+BtnqkLAU= +github.com/emicklei/go-restful/v3 v3.11.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -175,12 +170,14 @@ github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2Vvl github.com/evanphx/json-patch/v5 v5.7.0 h1:nJqP7uwL84RJInrohHfW0Fx3awjbm8qZeFv0nW9SYGc= github.com/evanphx/json-patch/v5 v5.7.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= @@ -195,9 +192,12 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v0.4.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= @@ -224,12 +224,15 @@ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEe github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gobuffalo/flect v1.0.2 h1:eqjPGSo2WmjgY2XlpGwo2NXgL3RucAKo4k4qQMNA5sA= github.com/gobuffalo/flect v1.0.2/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -242,15 +245,12 @@ github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -265,6 +265,7 @@ github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/cel-go v0.16.1 h1:3hZfSNiAU3KOiNtxuFXVp5WFy4hf/Ly3Sa4/7F8SXNo= github.com/google/cel-go v0.16.1/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= @@ -273,9 +274,7 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -292,18 +291,11 @@ github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= @@ -319,16 +311,21 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -366,6 +363,7 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2 github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= @@ -388,7 +386,6 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -417,8 +414,8 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= @@ -436,6 +433,7 @@ github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= @@ -491,7 +489,6 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= @@ -502,29 +499,29 @@ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDf github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= -github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 h1:v7DLqVdK4VrYkVD5diGdl4sxJurKJEMnODWRJlxV9oM= -github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= +github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= -github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -532,8 +529,8 @@ github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjR github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sagikazarmark/locafero v0.3.0 h1:zT7VEGWC2DTflmccN/5T1etyKvxSxpHsjb9cJvm4SvQ= -github.com/sagikazarmark/locafero v0.3.0/go.mod h1:w+v7UsPNFwzF1cHuOajOOzoq4U7v/ig1mpRjqV+Bu1U= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= @@ -545,20 +542,22 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.10.0 h1:EaGW2JJh15aKOejeuJ+wpFSHnbd7GE6Wvp3TsNhb6LY= -github.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= -github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= @@ -571,8 +570,8 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.17.0 h1:I5txKw7MJasPL/BrfkbA0Jyo/oELqVmux4pR/UxOMfI= -github.com/spf13/viper v1.17.0/go.mod h1:BmMMMLQXSbcHK6KAOiFLz0l5JHrU89OdIRHvsk0+yVI= +github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= +github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -597,6 +596,7 @@ github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8 github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ= github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= @@ -610,27 +610,54 @@ github.com/vmware-tanzu/vm-operator/external/tanzu-topology v0.0.0-2023121418500 github.com/vmware-tanzu/vm-operator/external/tanzu-topology v0.0.0-20231214185006-5477585eebfd/go.mod h1:dfYrWS8DMRN+XZfhu8M4LVHmeGvYB29Ipd7j4uIq+mU= github.com/vmware/govmomi v0.34.2 h1:o6ydkTVITOkpQU6HAf6tP5GvHFCNJlNUNlMsvFK77X4= github.com/vmware/govmomi v0.34.2/go.mod h1:qWWT6n9mdCr/T9vySsoUqcI04sSEj4CqHXxtk/Y+Los= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489 h1:1JFLBqwIgdyHN1ZtgjTBwO+blA6gVOmZurpiMEsETKo= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.etcd.io/etcd/api/v3 v3.5.11 h1:B54KwXbWDHyD3XYAwprxNzTe7vlhR69LuBgZnMVvS7E= +go.etcd.io/etcd/api/v3 v3.5.11/go.mod h1:Ot+o0SWSyT6uHhA56al1oCED0JImsRiU9Dc26+C2a+4= +go.etcd.io/etcd/client/pkg/v3 v3.5.11 h1:bT2xVspdiCj2910T0V+/KHcVKjkUrCZVtk8J2JF2z1A= +go.etcd.io/etcd/client/pkg/v3 v3.5.11/go.mod h1:seTzl2d9APP8R5Y2hFL3NVlD6qC/dOT+3kvrqPyTas4= +go.etcd.io/etcd/client/v2 v2.305.10 h1:MrmRktzv/XF8CvtQt+P6wLUlURaNpSDJHFZhe//2QE4= +go.etcd.io/etcd/client/v3 v3.5.11 h1:ajWtgoNSZJ1gmS8k+icvPtqsqEav+iUorF7b0qozgUU= +go.etcd.io/etcd/client/v3 v3.5.11/go.mod h1:a6xQUEqFJ8vztO1agJh/KQKOMfFI8og52ZconzcDJwE= +go.etcd.io/etcd/pkg/v3 v3.5.9 h1:6R2jg/aWd/zB9+9JxmijDKStGJAPFsX3e6BeJkMi6eQ= +go.etcd.io/etcd/raft/v3 v3.5.9 h1:ZZ1GIHoUlHsn0QVqiRysAm3/81Xx7+i2d7nSdWxlOiI= +go.etcd.io/etcd/server/v3 v3.5.9 h1:vomEmmxeztLtS5OEH7d0hBAg4cjVIu9wXuNzUZx2ZA0= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 h1:PzIubN4/sjByhDRHLviCjJuweBXWFZWhghjg7cS28+M= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0 h1:1eHu3/pUSWaOgltNK3WJFaywKsTIr/PwvHyDmi0lQA0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0/go.mod h1:HyABWq60Uy1kjJSa2BVOxUVao8Cdick5AWSKPutqy6U= +go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc= +go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0 h1:DeFD0VgTZ+Cj6hxravYYZE2W4GlneVH81iAOPjZkzk8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0/go.mod h1:GijYcYmNpX1KazD5JmWGsi4P7dDTTTnfv1UbGn84MnU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0 h1:gvmNvqrPYovvyRmCSygkUDyL8lC5Tl845MLEwqpxhEU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0/go.mod h1:vNUq47TGFioo+ffTSnKNdob241vePmtNZnAODKapKd0= +go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA= +go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= +go.opentelemetry.io/otel/sdk v1.20.0 h1:5Jf6imeFZlZtKv9Qbo6qt2ZkmWtdWx/wzcCbNUlAWGM= +go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= +go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ= +go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -638,6 +665,7 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8 go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c= +go.uber.org/zap v1.25.0/go.mod h1:JIAUzQIH94IC4fOJQm7gMmBJP5k7wQfdcnYdPoEXJYk= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -648,9 +676,7 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= @@ -678,7 +704,6 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -688,8 +713,6 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -708,7 +731,6 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -720,22 +742,12 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= @@ -745,20 +757,14 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.14.0 h1:P0Vrf/2538nmC0H+pEQ3MNFRRnVR7RlqyVw+bvm26z0= -golang.org/x/oauth2 v0.14.0/go.mod h1:lAtNWgaWfL4cm7j2OV8TxGi9Qb7ECORx8DktCY74OwM= +golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= +golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -798,26 +804,16 @@ golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -825,7 +821,6 @@ golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= @@ -854,8 +849,8 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -893,27 +888,11 @@ golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= @@ -934,22 +913,12 @@ google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -969,47 +938,25 @@ google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb h1:lK0oleSc7IQsUxO3U5TjL9DWlsxpEBemh+zpB7IqhWI= -google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 h1:N3bU/SQDCDyD6R528GJ/PwW9KjYcJA3dgyH+MovAkIM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA= +google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ= +google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy1+IPwWHZUzoD0IccYZIrXILAQpc+Qy9CMhY= +google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 h1:JpwMPBpFN3uKhdaekDpiNlImDdkUAyiJ6ez/uxGaUSo= +google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f h1:ultW7fxlIvee4HYrtnaRPon9HpEgFk5zYpmfMgtKB5I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= +google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1034,6 +981,7 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= @@ -1041,6 +989,8 @@ gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= @@ -1068,7 +1018,6 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.21.1/go.mod h1:FstGROTmsSHBarKc8bylzXih8BLNYTiS3TZcsoEDg2s= k8s.io/api v0.28.6 h1:yy6u9CuIhmg55YvF/BavPBBXB+5QicB64njJXxVnzLo= k8s.io/api v0.28.6/go.mod h1:AM6Ys6g9MY3dl/XNaNfg/GePI0FT7WBGu8efU/lirAo= @@ -1101,6 +1050,8 @@ k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= +k8s.io/kms v0.28.6 h1:WfpL9iSiB012zPUtPGT+OGv4yncdcvwH1ce/UYv4RjQ= +k8s.io/kms v0.28.6/go.mod h1:ONhtDMHoDgKQ/QzN6WiqJlmnpE9iyMQg1pLock4zug8= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= @@ -1113,6 +1064,8 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8 rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 h1:trsWhjU5jZrx6UvFu4WzQDrN7Pga4a7Qg+zcfcj64PA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2/go.mod h1:+qG7ISXqCDVVcyO8hLn12AKVYYUjM7ftlqsqmrhMZE0= sigs.k8s.io/cluster-api v1.6.1 h1:I34p/fwgRlEhs+o9cUhKXDwNNfPS3no0yJsd2bJyQVc= sigs.k8s.io/cluster-api v1.6.1/go.mod h1:DaxwruDvSaEYq5q6FREDaGzX6UsAVUCA99Sp8vfMHyQ= sigs.k8s.io/cluster-api/test v1.6.1 h1:9TffRPOuYNUyfHqdeWQtFhdK0oY+NAbvjlzbqK7chTw= diff --git a/test/infrastructure/tmp-to-be-deleted/README.md b/test/infrastructure/tmp-to-be-deleted/README.md new file mode 100644 index 0000000000..18d6b95980 --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/README.md @@ -0,0 +1 @@ +We can drop this package as soon as we bump to a CAPI version including https://github.com/kubernetes-sigs/cluster-api/pull/9986 diff --git a/test/infrastructure/tmp-to-be-deleted/api/v1alpha1/etcdcluster_annotations.go b/test/infrastructure/tmp-to-be-deleted/api/v1alpha1/etcdcluster_annotations.go new file mode 100644 index 0000000000..3b56062ce6 --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/api/v1alpha1/etcdcluster_annotations.go @@ -0,0 +1,42 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha1 is a temporary copy from CAPI in-memory provider. We can drop this package as soon as we bump to +// a CAPI version including https://github.com/kubernetes-sigs/cluster-api/pull/9986 +package v1alpha1 + +// defines annotations to be applied to in memory etcd pods in order to track etcd cluster +// info belonging to the etcd member each pod represent. +const ( + // EtcdClusterIDAnnotationName defines the name of the annotation applied to in memory etcd + // pods to track the cluster ID of the etcd member each pod represent. + EtcdClusterIDAnnotationName = "etcd.inmemory.infrastructure.cluster.x-k8s.io/cluster-id" + + // EtcdMemberIDAnnotationName defines the name of the annotation applied to in memory etcd + // pods to track the member ID of the etcd member each pod represent. + EtcdMemberIDAnnotationName = "etcd.inmemory.infrastructure.cluster.x-k8s.io/member-id" + + // EtcdLeaderFromAnnotationName defines the name of the annotation applied to in memory etcd + // pods to track leadership status of the etcd member each pod represent. + // Note: We are tracking the time from an etcd member is leader; if more than one pod has this + // annotation, the last etcd member that became leader is the current leader. + // By using this mechanism leadership can be forwarded to another pod with an atomic operation + // (add/update of the annotation to the pod/etcd member we are forwarding leadership to). + EtcdLeaderFromAnnotationName = "etcd.inmemory.infrastructure.cluster.x-k8s.io/leader-from" + + // EtcdMemberRemoved is added to etcd pods which have been removed from the etcd cluster. + EtcdMemberRemoved = "etcd.inmemory.infrastructure.cluster.x-k8s.io/member-removed" +) diff --git a/test/infrastructure/tmp-to-be-deleted/api/v1alpha1/groupversion_info.go b/test/infrastructure/tmp-to-be-deleted/api/v1alpha1/groupversion_info.go new file mode 100644 index 0000000000..545aea8832 --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/api/v1alpha1/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha1 contains API Schema definitions for the inmemory v1alpha1 API group +// +kubebuilder:object:generate=true +// +groupName=virtual.cluster.x-k8s.io +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects. + GroupVersion = schema.GroupVersion{Group: "virtual.cluster.x-k8s.io", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/test/infrastructure/tmp-to-be-deleted/api/v1alpha1/machine_types.go b/test/infrastructure/tmp-to-be-deleted/api/v1alpha1/machine_types.go new file mode 100644 index 0000000000..d1724a7757 --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/api/v1alpha1/machine_types.go @@ -0,0 +1,56 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// CloudMachineKind is the kind of CloudMachine. +const CloudMachineKind = "CloudMachine" + +// CloudMachineSpec is the spec of CloudMachine. +type CloudMachineSpec struct { +} + +// CloudMachineStatus is the status of CloudMachine. +type CloudMachineStatus struct { +} + +// +kubebuilder:object:root=true + +// CloudMachine represents a machine in memory. +type CloudMachine struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec CloudMachineSpec `json:"spec,omitempty"` + Status CloudMachineStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CloudMachineList contains a list of CloudMachine. +type CloudMachineList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CloudMachine `json:"items"` +} + +func init() { + SchemeBuilder.Register(&CloudMachine{}, &CloudMachineList{}) +} diff --git a/test/infrastructure/tmp-to-be-deleted/api/v1alpha1/zz_generated.deepcopy.go b/test/infrastructure/tmp-to-be-deleted/api/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..1d32ee5cfa --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/api/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,114 @@ +//go:build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudMachine) DeepCopyInto(out *CloudMachine) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudMachine. +func (in *CloudMachine) DeepCopy() *CloudMachine { + if in == nil { + return nil + } + out := new(CloudMachine) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CloudMachine) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudMachineList) DeepCopyInto(out *CloudMachineList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CloudMachine, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudMachineList. +func (in *CloudMachineList) DeepCopy() *CloudMachineList { + if in == nil { + return nil + } + out := new(CloudMachineList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CloudMachineList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudMachineSpec) DeepCopyInto(out *CloudMachineSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudMachineSpec. +func (in *CloudMachineSpec) DeepCopy() *CloudMachineSpec { + if in == nil { + return nil + } + out := new(CloudMachineSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudMachineStatus) DeepCopyInto(out *CloudMachineStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudMachineStatus. +func (in *CloudMachineStatus) DeepCopy() *CloudMachineStatus { + if in == nil { + return nil + } + out := new(CloudMachineStatus) + in.DeepCopyInto(out) + return out +} diff --git a/test/infrastructure/tmp-to-be-deleted/runtime/alias.go b/test/infrastructure/tmp-to-be-deleted/runtime/alias.go new file mode 100644 index 0000000000..b8b376ddee --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/runtime/alias.go @@ -0,0 +1,39 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "sigs.k8s.io/controller-runtime/pkg/client" + + inmemoryclient "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/runtime/client" + inmemorymanager "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/runtime/manager" +) + +// Client knows how to perform CRUD operations on resources in a resource group. +type Client inmemoryclient.Client + +// Object represents an object. +type Object client.Object + +// Manager initializes shared dependencies such as Caches and Clients, and provides them to Runnables. +// A Manager is required to create Controllers. +type Manager inmemorymanager.Manager + +var ( + // NewManager returns a new Manager for creating Controllers. + NewManager = inmemorymanager.New +) diff --git a/test/infrastructure/tmp-to-be-deleted/runtime/cache/cache.go b/test/infrastructure/tmp-to-be-deleted/runtime/cache/cache.go new file mode 100644 index 0000000000..78508a878c --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/runtime/cache/cache.go @@ -0,0 +1,196 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// Cache implements an in-memory cache. +type Cache interface { + Start(ctx context.Context) error + + AddResourceGroup(name string) + DeleteResourceGroup(name string) + + Get(resourceGroup string, key client.ObjectKey, obj client.Object) error + List(resourceGroup string, list client.ObjectList, opts ...client.ListOption) error + Create(resourceGroup string, obj client.Object) error + Delete(resourceGroup string, obj client.Object) error + Update(resourceGroup string, obj client.Object) error + Patch(resourceGroup string, obj client.Object, patch client.Patch) error + + GetInformer(ctx context.Context, obj client.Object) (Informer, error) + GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (Informer, error) +} + +// Informer forwards events to event handlers. +type Informer interface { + AddEventHandler(handler InformEventHandler) error + RemoveEventHandler(handler InformEventHandler) error +} + +// InformEventHandler handle events originated by a source. +type InformEventHandler interface { + OnCreate(resourceGroup string, obj client.Object) + OnUpdate(resourceGroup string, oldObj, newObj client.Object) + OnDelete(resourceGroup string, obj client.Object) + OnGeneric(resourceGroup string, obj client.Object) +} + +type cache struct { + scheme *runtime.Scheme + + lock sync.RWMutex + resourceGroups map[string]*resourceGroupTracker + informers map[schema.GroupVersionKind]Informer + + garbageCollectorRequeueAfter time.Duration + garbageCollectorRequeueAfterJitterFactor float64 + garbageCollectorConcurrency int + garbageCollectorQueue workqueue.RateLimitingInterface + + syncPeriod time.Duration + syncConcurrency int + syncQueue workqueue.RateLimitingInterface + + started bool +} + +type resourceGroupTracker struct { + lock sync.RWMutex + objects map[schema.GroupVersionKind]map[types.NamespacedName]client.Object + // ownedObjects tracks ownership. Key is the owner, values are the owned objects. + ownedObjects map[ownReference]map[ownReference]struct{} +} + +type ownReference struct { + gvk schema.GroupVersionKind + key types.NamespacedName +} + +func newOwnReferenceFromOwnerReference(namespace string, owner metav1.OwnerReference) (*ownReference, error) { + gv, err := schema.ParseGroupVersion(owner.APIVersion) + if err != nil { + return nil, apierrors.NewBadRequest(fmt.Sprintf("invalid APIVersion in ownerReferences: %s", owner.APIVersion)) + } + ownerGVK := gv.WithKind(owner.Kind) + ownerKey := types.NamespacedName{ + // TODO: check if there is something to do for namespaced objects owned by global objects + Namespace: namespace, + Name: owner.Name, + } + return &ownReference{gvk: ownerGVK, key: ownerKey}, nil +} + +var _ Cache = &cache{} + +// NewCache returns a new Cache. +func NewCache(scheme *runtime.Scheme) Cache { + return &cache{ + scheme: scheme, + resourceGroups: map[string]*resourceGroupTracker{}, + informers: map[schema.GroupVersionKind]Informer{}, + garbageCollectorRequeueAfter: 30 * time.Second, // TODO:Expose as option + garbageCollectorRequeueAfterJitterFactor: 0.3, // TODO: Expose as option + garbageCollectorConcurrency: 1, // TODO: Expose as option + syncPeriod: 10 * time.Minute, // TODO:Expose as option + syncConcurrency: 1, // TODO: Expose as option + } +} + +func (c *cache) Start(ctx context.Context) error { + log := ctrl.LoggerFrom(ctx) + + if ctx == nil { + return fmt.Errorf("context cannot be nil") + } + + if c.started { + return fmt.Errorf("cache started more than once") + } + + log.Info("Starting cache") + + if err := c.startGarbageCollector(ctx); err != nil { + return err + } + if err := c.startSyncer(ctx); err != nil { + return err + } + + c.started = true + log.Info("Cache successfully started!") + return nil +} + +func (c *cache) AddResourceGroup(name string) { + c.lock.Lock() + defer c.lock.Unlock() + if _, ok := c.resourceGroups[name]; ok { + return + } + c.resourceGroups[name] = &resourceGroupTracker{ + objects: map[schema.GroupVersionKind]map[types.NamespacedName]client.Object{}, + ownedObjects: map[ownReference]map[ownReference]struct{}{}, + } +} + +func (c *cache) DeleteResourceGroup(name string) { + c.lock.Lock() + defer c.lock.Unlock() + delete(c.resourceGroups, name) +} + +func (c *cache) resourceGroupTracker(resourceGroup string) *resourceGroupTracker { + c.lock.RLock() + defer c.lock.RUnlock() + return c.resourceGroups[resourceGroup] +} + +func (c *cache) gvkGetAndSet(obj runtime.Object) (schema.GroupVersionKind, error) { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return schema.GroupVersionKind{}, apierrors.NewInternalError(err) + } + + obj.GetObjectKind().SetGroupVersionKind(gvk) + return gvk, nil +} + +// unsafeGuessGroupVersionResource assumes Resource is equal to Kind which is not the case in normal Kubernetes. +func unsafeGuessGroupVersionResource(gvk schema.GroupVersionKind) schema.GroupVersionResource { + return schema.GroupVersionResource{Group: gvk.Group, Version: gvk.Version, Resource: gvk.Kind} +} + +func unsafeGuessObjectKindFromList(gvk schema.GroupVersionKind) schema.GroupVersionKind { + return schema.GroupVersionKind{Group: gvk.Group, Version: gvk.Version, Kind: strings.TrimSuffix(gvk.Kind, "List")} +} diff --git a/test/infrastructure/tmp-to-be-deleted/runtime/cache/cache_test.go b/test/infrastructure/tmp-to-be-deleted/runtime/cache/cache_test.go new file mode 100644 index 0000000000..9972f43754 --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/runtime/cache/cache_test.go @@ -0,0 +1,136 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "fmt" + "math/rand" + "sync/atomic" + "testing" + "time" + + . "github.com/onsi/gomega" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" + ctrl "sigs.k8s.io/controller-runtime" + + cloudv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/api/v1alpha1" +) + +var scheme = runtime.NewScheme() + +func init() { + _ = cloudv1.AddToScheme(scheme) +} + +func Test_cache_scale(t *testing.T) { + t.Skip() + g := NewWithT(t) + + ctrl.SetLogger(klog.Background()) + + resourceGroups := 1000 + objectsForResourceGroups := 500 + operationFrequencyForResourceGroup := 10 * time.Millisecond + testDuration := 2 * time.Minute + + var createCount uint64 + var getCount uint64 + var listCount uint64 + var deleteCount uint64 + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + c := NewCache(scheme).(*cache) + c.syncPeriod = testDuration / 10 // force a shorter sync period + c.garbageCollectorRequeueAfter = 500 * time.Millisecond // force a shorter gc requeueAfter + err := c.Start(ctx) + g.Expect(err).ToNot(HaveOccurred()) + + g.Eventually(func() bool { + return c.started + }, 5*time.Second, 200*time.Millisecond).Should(BeTrue(), "manager should start") + + machineName := func(j int) string { + return fmt.Sprintf("machine-%d", j) + } + + for i := 0; i < resourceGroups; i++ { + resourceGroup := fmt.Sprintf("resourceGroup-%d", i) + c.AddResourceGroup(resourceGroup) + + go func() { + for { + select { + case <-time.After(wait.Jitter(operationFrequencyForResourceGroup, 1)): + operation := rand.Intn(3) //nolint:gosec // Intentionally using a weak random number generator here. + item := rand.Intn(objectsForResourceGroups) //nolint:gosec // Intentionally using a weak random number generator here. + switch operation { + case 0: // create or get + machine := &cloudv1.CloudMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: machineName(item), + }, + } + err := c.Create(resourceGroup, machine) + if apierrors.IsAlreadyExists(err) { + if err = c.Get(resourceGroup, types.NamespacedName{Name: machineName(item)}, machine); err == nil { + atomic.AddUint64(&getCount, 1) + continue + } + } + g.Expect(err).ToNot(HaveOccurred()) + atomic.AddUint64(&createCount, 1) + case 1: // list + obj := &cloudv1.CloudMachineList{} + err := c.List(resourceGroup, obj) + g.Expect(err).ToNot(HaveOccurred()) + atomic.AddUint64(&listCount, 1) + case 2: // delete + g.Expect(err).ToNot(HaveOccurred()) + machine := &cloudv1.CloudMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: machineName(item), + }, + } + err := c.Delete(resourceGroup, machine) + if apierrors.IsNotFound(err) { + continue + } + g.Expect(err).ToNot(HaveOccurred()) + atomic.AddUint64(&deleteCount, 1) + } + + case <-ctx.Done(): + return + } + } + }() + } + + time.Sleep(testDuration) + + t.Log("createCount", createCount, "getCount", getCount, "listCount", listCount, "deleteCount", deleteCount) + + cancel() +} diff --git a/test/infrastructure/tmp-to-be-deleted/runtime/cache/client.go b/test/infrastructure/tmp-to-be-deleted/runtime/cache/client.go new file mode 100644 index 0000000000..ff01f75230 --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/runtime/cache/client.go @@ -0,0 +1,446 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "fmt" + "time" + + jsonpatch "github.com/evanphx/json-patch/v5" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (c *cache) Get(resourceGroup string, objKey client.ObjectKey, obj client.Object) error { + if resourceGroup == "" { + return apierrors.NewBadRequest("resourceGroup must not be empty") + } + + if objKey.Name == "" { + return apierrors.NewBadRequest("objKey.Name must not be empty") + } + + if obj == nil { + return apierrors.NewBadRequest("object must not be nil") + } + + objGVK, err := c.gvkGetAndSet(obj) + if err != nil { + return err + } + + tracker := c.resourceGroupTracker(resourceGroup) + if tracker == nil { + return apierrors.NewBadRequest(fmt.Sprintf("resourceGroup %s does not exist", resourceGroup)) + } + + tracker.lock.RLock() + defer tracker.lock.RUnlock() + + objects, ok := tracker.objects[objGVK] + if !ok { + return apierrors.NewNotFound(unsafeGuessGroupVersionResource(objGVK).GroupResource(), objKey.String()) + } + + trackedObj, ok := objects[objKey] + if !ok { + return apierrors.NewNotFound(unsafeGuessGroupVersionResource(objGVK).GroupResource(), objKey.String()) + } + + if err := c.scheme.Convert(trackedObj, obj, nil); err != nil { + return apierrors.NewInternalError(err) + } + obj.GetObjectKind().SetGroupVersionKind(trackedObj.GetObjectKind().GroupVersionKind()) + + return nil +} + +func (c *cache) List(resourceGroup string, list client.ObjectList, opts ...client.ListOption) error { + if resourceGroup == "" { + return apierrors.NewBadRequest("resourceGroup must not be empty") + } + + if list == nil { + return apierrors.NewBadRequest("list must not be nil") + } + + gvk, err := c.gvkGetAndSet(list) + if err != nil { + return err + } + + tracker := c.resourceGroupTracker(resourceGroup) + if tracker == nil { + return apierrors.NewBadRequest(fmt.Sprintf("resourceGroup %s does not exist", resourceGroup)) + } + + tracker.lock.RLock() + defer tracker.lock.RUnlock() + + items := make([]runtime.Object, 0) + objects, ok := tracker.objects[unsafeGuessObjectKindFromList(gvk)] + if ok { + listOpts := client.ListOptions{} + listOpts.ApplyOptions(opts) + + for _, obj := range objects { + if listOpts.Namespace != "" && obj.GetNamespace() != listOpts.Namespace { + continue + } + + if listOpts.LabelSelector != nil && !listOpts.LabelSelector.Empty() { + metaLabels := labels.Set(obj.GetLabels()) + if !listOpts.LabelSelector.Matches(metaLabels) { + continue + } + } + + // TODO(killianmuldoon): This only matches the nodeName field for pods. No other fieldSelectors are implemented. This should return an error if another fieldselector is used. + if pod, ok := obj.(*corev1.Pod); ok { + if listOpts.FieldSelector != nil && !listOpts.FieldSelector.Empty() { + if !listOpts.FieldSelector.Matches(fields.Set{"spec.nodeName": pod.Spec.NodeName}) { + continue + } + } + } + + obj := obj.DeepCopyObject().(client.Object) + switch list.(type) { + case *unstructured.UnstructuredList: + unstructuredObj := &unstructured.Unstructured{} + if err := c.scheme.Convert(obj, unstructuredObj, nil); err != nil { + return apierrors.NewInternalError(err) + } + items = append(items, unstructuredObj) + default: + items = append(items, obj) + } + } + } + + if err := meta.SetList(list, items); err != nil { + return apierrors.NewInternalError(err) + } + return nil +} + +func (c *cache) Create(resourceGroup string, obj client.Object) error { + return c.store(resourceGroup, obj, false) +} + +func (c *cache) Update(resourceGroup string, obj client.Object) error { + return c.store(resourceGroup, obj, true) +} + +func (c *cache) store(resourceGroup string, obj client.Object, replaceExisting bool) error { + if resourceGroup == "" { + return apierrors.NewBadRequest("resourceGroup must not be empty") + } + + if obj == nil { + return apierrors.NewBadRequest("object must not be nil") + } + + objGVK, err := c.gvkGetAndSet(obj) + if err != nil { + return err + } + + if replaceExisting && obj.GetName() == "" { + return apierrors.NewBadRequest("object name must not be empty") + } + + tracker := c.resourceGroupTracker(resourceGroup) + if tracker == nil { + return apierrors.NewBadRequest(fmt.Sprintf("resourceGroup %s does not exist", resourceGroup)) + } + + tracker.lock.Lock() + defer tracker.lock.Unlock() + + // Note: This just validates that all owners exist in tracker. + for _, o := range obj.GetOwnerReferences() { + oRef, err := newOwnReferenceFromOwnerReference(obj.GetNamespace(), o) + if err != nil { + return err + } + objects, ok := tracker.objects[oRef.gvk] + if !ok { + return apierrors.NewBadRequest(fmt.Sprintf("ownerReference %s, Name=%s does not exist (GVK not tracked)", oRef.gvk, oRef.key.Name)) + } + if _, ok := objects[oRef.key]; !ok { + return apierrors.NewBadRequest(fmt.Sprintf("ownerReference %s, Name=%s does not exist", oRef.gvk, oRef.key.Name)) + } + } + + _, ok := tracker.objects[objGVK] + if !ok { + tracker.objects[objGVK] = make(map[types.NamespacedName]client.Object) + } + + // TODO: if unstructured, convert to typed object + + objKey := client.ObjectKeyFromObject(obj) + objRef := ownReference{gvk: objGVK, key: objKey} + if trackedObj, ok := tracker.objects[objGVK][objKey]; ok { + if replaceExisting { + if trackedObj.GetResourceVersion() != obj.GetResourceVersion() { + return apierrors.NewConflict(unsafeGuessGroupVersionResource(objGVK).GroupResource(), objKey.String(), fmt.Errorf("object has been modified")) + } + + if err := c.beforeUpdate(resourceGroup, trackedObj, obj); err != nil { + return err + } + tracker.objects[objGVK][objKey] = obj.DeepCopyObject().(client.Object) + updateTrackerOwnerReferences(tracker, trackedObj, obj, objRef) + c.afterUpdate(resourceGroup, trackedObj, obj) + return nil + } + return apierrors.NewAlreadyExists(unsafeGuessGroupVersionResource(objGVK).GroupResource(), objKey.String()) + } + + if replaceExisting { + return apierrors.NewNotFound(unsafeGuessGroupVersionResource(objGVK).GroupResource(), objKey.String()) + } + + if err := c.beforeCreate(resourceGroup, obj); err != nil { + return err + } + tracker.objects[objGVK][objKey] = obj.DeepCopyObject().(client.Object) + updateTrackerOwnerReferences(tracker, nil, obj, objRef) + c.afterCreate(resourceGroup, obj) + return nil +} + +func updateTrackerOwnerReferences(tracker *resourceGroupTracker, oldObj, newObj client.Object, objRef ownReference) { + if oldObj != nil { + for _, oldOwnerRef := range oldObj.GetOwnerReferences() { + ownerRefStillExists := false + for _, newOwnerRef := range newObj.GetOwnerReferences() { + if oldOwnerRef == newOwnerRef { + ownerRefStillExists = true + break + } + } + + if !ownerRefStillExists { + // Remove ownerRef from tracker (if necessary) as it has been removed from object. + oldRef, _ := newOwnReferenceFromOwnerReference(newObj.GetNamespace(), oldOwnerRef) + if _, ok := tracker.ownedObjects[*oldRef]; !ok { + continue + } + delete(tracker.ownedObjects[*oldRef], objRef) + if len(tracker.ownedObjects[*oldRef]) == 0 { + delete(tracker.ownedObjects, *oldRef) + } + } + } + } + for _, newOwnerRef := range newObj.GetOwnerReferences() { + ownerRefAlreadyExisted := false + if oldObj != nil { + for _, oldOwnerRef := range oldObj.GetOwnerReferences() { + if newOwnerRef == oldOwnerRef { + ownerRefAlreadyExisted = true + break + } + } + } + + if !ownerRefAlreadyExisted { + // Add new ownerRef to tracker. + newRef, _ := newOwnReferenceFromOwnerReference(newObj.GetNamespace(), newOwnerRef) + if _, ok := tracker.ownedObjects[*newRef]; !ok { + tracker.ownedObjects[*newRef] = map[ownReference]struct{}{} + } + tracker.ownedObjects[*newRef][objRef] = struct{}{} + } + } +} + +func (c *cache) Patch(resourceGroup string, obj client.Object, patch client.Patch) error { + patchData, err := patch.Data(obj) + if err != nil { + return apierrors.NewInternalError(err) + } + + encoder, err := c.getEncoder(obj, obj.GetObjectKind().GroupVersionKind().GroupVersion()) + if err != nil { + return apierrors.NewInternalError(err) + } + + originalObjJS, err := runtime.Encode(encoder, obj) + if err != nil { + return apierrors.NewInternalError(err) + } + + var changedJS []byte + switch patch.Type() { + case types.MergePatchType: + changedJS, err = jsonpatch.MergePatch(originalObjJS, patchData) + if err != nil { + return apierrors.NewInternalError(err) + } + case types.StrategicMergePatchType: + // NOTE: we are treating StrategicMergePatch as MergePatch; it is an acceptable proxy for this use case. + changedJS, err = jsonpatch.MergePatch(originalObjJS, patchData) + if err != nil { + return apierrors.NewInternalError(err) + } + default: + return apierrors.NewBadRequest(fmt.Sprintf("patch of type %s is not supported", patch.Type())) + } + + codecFactory := serializer.NewCodecFactory(c.scheme) + err = runtime.DecodeInto(codecFactory.UniversalDecoder(), changedJS, obj) + if err != nil { + return apierrors.NewInternalError(err) + } + + return c.store(resourceGroup, obj, true) +} + +func (c *cache) getEncoder(obj runtime.Object, gv runtime.GroupVersioner) (runtime.Encoder, error) { + codecs := serializer.NewCodecFactory(c.scheme) + + info, ok := runtime.SerializerInfoForMediaType(codecs.SupportedMediaTypes(), runtime.ContentTypeJSON) + if !ok { + return nil, fmt.Errorf("failed to create serializer for %T", obj) + } + + encoder := codecs.EncoderForVersion(info.Serializer, gv) + return encoder, nil +} + +func (c *cache) Delete(resourceGroup string, obj client.Object) error { + if resourceGroup == "" { + return apierrors.NewBadRequest("resourceGroup must not be empty") + } + + if obj == nil { + return apierrors.NewBadRequest("object must not be nil") + } + + if obj.GetName() == "" { + return apierrors.NewBadRequest("object name must not be empty") + } + + objGVK, err := c.gvkGetAndSet(obj) + if err != nil { + return err + } + + obj = obj.DeepCopyObject().(client.Object) + + objKey := client.ObjectKeyFromObject(obj) + deleted, err := c.tryDelete(resourceGroup, objGVK, objKey) + if err != nil { + return err + } + if !deleted { + c.garbageCollectorQueue.Add(gcRequest{ + resourceGroup: resourceGroup, + gvk: objGVK, + key: objKey, + }) + } + return nil +} + +func (c *cache) tryDelete(resourceGroup string, gvk schema.GroupVersionKind, key types.NamespacedName) (bool, error) { + tracker := c.resourceGroupTracker(resourceGroup) + if tracker == nil { + return true, apierrors.NewBadRequest(fmt.Sprintf("resourceGroup %s does not exist", resourceGroup)) + } + + tracker.lock.Lock() + defer tracker.lock.Unlock() + + return c.doTryDeleteLocked(resourceGroup, tracker, gvk, key) +} + +// doTryDeleteLocked tries to delete an objects. +// Note: The tracker must bve already locked when calling this method. +func (c *cache) doTryDeleteLocked(resourceGroup string, tracker *resourceGroupTracker, objGVK schema.GroupVersionKind, objKey types.NamespacedName) (bool, error) { + objects, ok := tracker.objects[objGVK] + if !ok { + return true, apierrors.NewNotFound(unsafeGuessGroupVersionResource(objGVK).GroupResource(), objKey.String()) + } + + obj, ok := tracker.objects[objGVK][objKey] + if !ok { + return true, apierrors.NewNotFound(unsafeGuessGroupVersionResource(objGVK).GroupResource(), objKey.String()) + } + + // Loop through objects that are owned by obj and try to delete them. + // TODO: Consider only deleting the hierarchy if the obj doesn't have any finalizers. + if ownedReferences, ok := tracker.ownedObjects[ownReference{gvk: objGVK, key: objKey}]; ok { + for ref := range ownedReferences { + deleted, err := c.doTryDeleteLocked(resourceGroup, tracker, ref.gvk, ref.key) + if err != nil { + return false, err + } + if !deleted { + c.garbageCollectorQueue.Add(gcRequest{ + resourceGroup: resourceGroup, + gvk: ref.gvk, + key: ref.key, + }) + } + } + delete(tracker.ownedObjects, ownReference{gvk: objGVK, key: objKey}) + } + + // Set the deletion timestamp if not already set. + if obj.GetDeletionTimestamp().IsZero() { + if err := c.beforeDelete(resourceGroup, obj); err != nil { + return false, apierrors.NewBadRequest(err.Error()) + } + + oldObj := obj.DeepCopyObject().(client.Object) + now := metav1.Time{Time: time.Now().UTC()} + obj.SetDeletionTimestamp(&now) + if err := c.beforeUpdate(resourceGroup, oldObj, obj); err != nil { + return false, apierrors.NewBadRequest(err.Error()) + } + + objects[objKey] = obj + c.afterUpdate(resourceGroup, oldObj, obj) + } + + // If the object still has finalizers return early. + if len(obj.GetFinalizers()) > 0 { + return false, nil + } + + // Object doesn't have finalizers, delete it. + // Note: we don't call informDelete here because we couldn't reconcile it + // because the object is already gone from the tracker. + delete(objects, objKey) + c.afterDelete(resourceGroup, obj) + return true, nil +} diff --git a/test/infrastructure/tmp-to-be-deleted/runtime/cache/client_test.go b/test/infrastructure/tmp-to-be-deleted/runtime/cache/client_test.go new file mode 100644 index 0000000000..bf64cfbfac --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/runtime/cache/client_test.go @@ -0,0 +1,808 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "fmt" + "testing" + "time" + + . "github.com/onsi/gomega" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" + + cloudv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/api/v1alpha1" +) + +func Test_cache_client(t *testing.T) { + t.Run("create objects", func(t *testing.T) { + g := NewWithT(t) + + c := NewCache(scheme).(*cache) + h := &fakeHandler{} + iMachine, err := c.GetInformer(context.TODO(), &cloudv1.CloudMachine{}) + g.Expect(err).ToNot(HaveOccurred()) + err = iMachine.AddEventHandler(h) + g.Expect(err).ToNot(HaveOccurred()) + + c.AddResourceGroup("foo") + + t.Run("fails if resourceGroup is empty", func(t *testing.T) { + g := NewWithT(t) + + obj := &cloudv1.CloudMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + } + err := c.Create("", obj) + g.Expect(err).To(HaveOccurred()) + g.Expect(apierrors.IsBadRequest(err)).To(BeTrue()) + }) + + t.Run("fails if obj is nil", func(t *testing.T) { + g := NewWithT(t) + + err := c.Create("foo", nil) + g.Expect(err).To(HaveOccurred()) + g.Expect(apierrors.IsBadRequest(err)).To(BeTrue()) + }) + + t.Run("fails if unknown kind", func(t *testing.T) { + // TODO implement test case + }) + + t.Run("fails if resourceGroup does not exist", func(t *testing.T) { + g := NewWithT(t) + + obj := &cloudv1.CloudMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + } + err := c.Create("bar", obj) + g.Expect(err).To(HaveOccurred()) + g.Expect(apierrors.IsBadRequest(err)).To(BeTrue()) + }) + + t.Run("create", func(t *testing.T) { + g := NewWithT(t) + + obj := createMachine(t, c, "foo", "bar") + + // Check all the computed fields have been updated on the object. + g.Expect(obj.CreationTimestamp.IsZero()).To(BeFalse()) + g.Expect(obj.ResourceVersion).ToNot(BeEmpty()) + g.Expect(obj.Annotations).To(HaveKey(lastSyncTimeAnnotation)) + + // Check internal state of the tracker is as expected. + c.lock.RLock() + defer c.lock.RUnlock() + + g.Expect(c.resourceGroups["foo"].objects).To(HaveKey(cloudv1.GroupVersion.WithKind(cloudv1.CloudMachineKind)), "gvk must exists in object tracker for foo") + key := types.NamespacedName{Name: "bar"} + g.Expect(c.resourceGroups["foo"].objects[cloudv1.GroupVersion.WithKind(cloudv1.CloudMachineKind)]).To(HaveKey(key), "Object bar must exists in object tracker for foo") + + r := c.resourceGroups["foo"].objects[cloudv1.GroupVersion.WithKind(cloudv1.CloudMachineKind)][key] + g.Expect(r.GetObjectKind().GroupVersionKind()).To(BeComparableTo(cloudv1.GroupVersion.WithKind(cloudv1.CloudMachineKind)), "gvk must be set") + g.Expect(r.GetName()).To(Equal("bar"), "name must be equal to object tracker key") + g.Expect(r.GetResourceVersion()).To(Equal("v1"), "resourceVersion must be set") + g.Expect(r.GetCreationTimestamp()).ToNot(BeZero(), "creation timestamp must be set") + g.Expect(r.GetAnnotations()).To(HaveKey(lastSyncTimeAnnotation), "last sync annotation must exists") + + g.Expect(h.Events()).To(ContainElement("foo, CloudMachine=bar, Created")) + }) + + t.Run("fails if Object already exists", func(t *testing.T) { + g := NewWithT(t) + + createMachine(t, c, "foo", "bazzz") + + obj := &cloudv1.CloudMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bazzz", + }, + } + err := c.Create("foo", obj) + g.Expect(err).To(HaveOccurred()) + g.Expect(apierrors.IsAlreadyExists(err)).To(BeTrue()) + }) + + t.Run("Create with owner references", func(t *testing.T) { + t.Run("fails for invalid owner reference", func(t *testing.T) { + g := NewWithT(t) + + obj := &cloudv1.CloudMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "child", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "something/not/valid", + Kind: "ParentKind", + Name: "parent", + }, + }, + }, + } + err := c.Create("foo", obj) + g.Expect(err).To(HaveOccurred()) + g.Expect(apierrors.IsBadRequest(err)).To(BeTrue()) + }) + t.Run("fails if referenced object does not exist", func(t *testing.T) { + g := NewWithT(t) + + obj := &cloudv1.CloudMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "child", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: cloudv1.GroupVersion.String(), + Kind: "CloudMachine", + Name: "parentx", + }, + }, + }, + } + err := c.Create("foo", obj) + g.Expect(err).To(HaveOccurred()) + g.Expect(apierrors.IsBadRequest(err)).To(BeTrue()) + }) + t.Run("create updates ownedObjects", func(t *testing.T) { + g := NewWithT(t) + + createMachine(t, c, "foo", "parent") + obj := &cloudv1.CloudMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "child", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: cloudv1.GroupVersion.String(), + Kind: "CloudMachine", + Name: "parent", + }, + }, + }, + } + err := c.Create("foo", obj) + g.Expect(err).ToNot(HaveOccurred()) + + // Check internal state of the tracker is as expected. + c.lock.RLock() + defer c.lock.RUnlock() + + parentRef := ownReference{gvk: cloudv1.GroupVersion.WithKind(cloudv1.CloudMachineKind), key: types.NamespacedName{Namespace: "", Name: "parent"}} + g.Expect(c.resourceGroups["foo"].ownedObjects).To(HaveKey(parentRef), "there should be ownedObjects for parent") + childRef := ownReference{gvk: cloudv1.GroupVersion.WithKind(cloudv1.CloudMachineKind), key: types.NamespacedName{Namespace: "", Name: "child"}} + g.Expect(c.resourceGroups["foo"].ownedObjects[parentRef]).To(HaveKey(childRef), "parent should own child") + }) + }) + }) + + t.Run("Get objects", func(t *testing.T) { + c := NewCache(scheme).(*cache) + c.AddResourceGroup("foo") + + t.Run("fails if resourceGroup is empty", func(t *testing.T) { + g := NewWithT(t) + + obj := &cloudv1.CloudMachine{} + err := c.Get("", types.NamespacedName{Name: "foo"}, obj) + g.Expect(err).To(HaveOccurred()) + g.Expect(apierrors.IsBadRequest(err)).To(BeTrue()) + }) + + t.Run("fails if name is empty", func(t *testing.T) { + g := NewWithT(t) + + obj := &cloudv1.CloudMachine{} + err := c.Get("foo", types.NamespacedName{}, obj) + g.Expect(err).To(HaveOccurred()) + g.Expect(apierrors.IsBadRequest(err)).To(BeTrue()) + }) + + t.Run("fails if Object is nil", func(t *testing.T) { + g := NewWithT(t) + + err := c.Get("foo", types.NamespacedName{Name: "foo"}, nil) + g.Expect(err).To(HaveOccurred()) + g.Expect(apierrors.IsBadRequest(err)).To(BeTrue()) + }) + + t.Run("fails if unknown kind", func(t *testing.T) { + // TODO implement test case + }) + + t.Run("fails if resourceGroup doesn't exist", func(t *testing.T) { + g := NewWithT(t) + + obj := &cloudv1.CloudMachine{} + err := c.Get("bar", types.NamespacedName{Name: "bar"}, obj) + g.Expect(err).To(HaveOccurred()) + g.Expect(apierrors.IsBadRequest(err)).To(BeTrue()) + }) + + t.Run("fails if gvk doesn't exist", func(t *testing.T) { + g := NewWithT(t) + + obj := &cloudv1.CloudMachine{} + err := c.Get("foo", types.NamespacedName{Name: "bar"}, obj) + g.Expect(err).To(HaveOccurred()) + g.Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + + t.Run("fails if Object doesn't exist", func(t *testing.T) { + g := NewWithT(t) + + createMachine(t, c, "foo", "barz") + + obj := &cloudv1.CloudMachine{} + err := c.Get("foo", types.NamespacedName{Name: "bar"}, obj) + g.Expect(err).To(HaveOccurred()) + g.Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + + t.Run("get", func(t *testing.T) { + g := NewWithT(t) + + createMachine(t, c, "foo", "bar") + + obj := &cloudv1.CloudMachine{} + err := c.Get("foo", types.NamespacedName{Name: "bar"}, obj) + g.Expect(err).ToNot(HaveOccurred()) + + // Check all the computed fields are as expected. + g.Expect(obj.GetObjectKind().GroupVersionKind()).To(BeComparableTo(cloudv1.GroupVersion.WithKind(cloudv1.CloudMachineKind)), "gvk must be set") + g.Expect(obj.GetName()).To(Equal("bar"), "name must be equal to object tracker key") + g.Expect(obj.GetResourceVersion()).To(Equal("v1"), "resourceVersion must be set") + g.Expect(obj.GetCreationTimestamp()).ToNot(BeZero(), "creation timestamp must be set") + g.Expect(obj.GetAnnotations()).To(HaveKey(lastSyncTimeAnnotation), "last sync annotation must be set") + }) + }) + + t.Run("list objects", func(t *testing.T) { + c := NewCache(scheme).(*cache) + c.AddResourceGroup("foo") + + t.Run("fails if resourceGroup is empty", func(t *testing.T) { + g := NewWithT(t) + + obj := &cloudv1.CloudMachineList{} + err := c.List("", obj) + g.Expect(err).To(HaveOccurred()) + g.Expect(apierrors.IsBadRequest(err)).To(BeTrue()) + }) + + t.Run("fails if Object is nil", func(t *testing.T) { + g := NewWithT(t) + + err := c.List("foo", nil) + g.Expect(err).To(HaveOccurred()) + g.Expect(apierrors.IsBadRequest(err)).To(BeTrue()) + }) + + t.Run("fails if unknown kind", func(t *testing.T) { + // TODO implement test case + }) + + t.Run("fails if resourceGroup doesn't exist", func(t *testing.T) { + g := NewWithT(t) + + obj := &cloudv1.CloudMachineList{} + err := c.List("bar", obj) + g.Expect(err).To(HaveOccurred()) + g.Expect(apierrors.IsBadRequest(err)).To(BeTrue()) + }) + + t.Run("list", func(t *testing.T) { + g := NewWithT(t) + + createMachine(t, c, "foo", "bar") + createMachine(t, c, "foo", "baz") + + obj := &cloudv1.CloudMachineList{} + err := c.List("foo", obj) + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(obj.Items).To(HaveLen(2)) + + i1 := obj.Items[0] + g.Expect(i1.GetObjectKind().GroupVersionKind()).To(Equal(cloudv1.GroupVersion.WithKind(cloudv1.CloudMachineKind)), "gvk must be set") + g.Expect(i1.GetAnnotations()).To(HaveKey(lastSyncTimeAnnotation), "last sync annotation must be present") + + i2 := obj.Items[1] + g.Expect(i2.GetObjectKind().GroupVersionKind()).To(Equal(cloudv1.GroupVersion.WithKind(cloudv1.CloudMachineKind)), "gvk must be set") + g.Expect(i2.GetAnnotations()).To(HaveKey(lastSyncTimeAnnotation), "last sync annotation must be present") + }) + + // TODO: test filtering by labels + }) + + t.Run("update objects", func(t *testing.T) { + g := NewWithT(t) + + c := NewCache(scheme).(*cache) + h := &fakeHandler{} + i, err := c.GetInformer(context.TODO(), &cloudv1.CloudMachine{}) + g.Expect(err).ToNot(HaveOccurred()) + err = i.AddEventHandler(h) + g.Expect(err).ToNot(HaveOccurred()) + + c.AddResourceGroup("foo") + + t.Run("fails if resourceGroup is empty", func(t *testing.T) { + g := NewWithT(t) + + obj := &cloudv1.CloudMachine{} + err := c.Update("", obj) + g.Expect(err).To(HaveOccurred()) + g.Expect(apierrors.IsBadRequest(err)).To(BeTrue()) + }) + + t.Run("fails if Object is nil", func(t *testing.T) { + g := NewWithT(t) + + err := c.Update("foo", nil) + g.Expect(err).To(HaveOccurred()) + g.Expect(apierrors.IsBadRequest(err)).To(BeTrue()) + }) + + t.Run("fails if unknown kind", func(t *testing.T) { + // TODO implement test case + }) + + t.Run("fails if name is empty", func(t *testing.T) { + g := NewWithT(t) + + obj := &cloudv1.CloudMachine{} + err := c.Update("foo", obj) + g.Expect(err).To(HaveOccurred()) + g.Expect(apierrors.IsBadRequest(err)).To(BeTrue()) + }) + + t.Run("fails if resourceGroup doesn't exist", func(t *testing.T) { + g := NewWithT(t) + + obj := &cloudv1.CloudMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bar", + }, + } + err := c.Update("bar", obj) + g.Expect(err).To(HaveOccurred()) + g.Expect(apierrors.IsBadRequest(err)).To(BeTrue()) + }) + + t.Run("fails if Object doesn't exist", func(t *testing.T) { + g := NewWithT(t) + + obj := &cloudv1.CloudMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bar", + }, + } + err := c.Update("foo", obj) + g.Expect(err).To(HaveOccurred()) + g.Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + + t.Run("update - no changes", func(t *testing.T) { + g := NewWithT(t) + + objBefore := createMachine(t, c, "foo", "bar") + + objUpdate := objBefore.DeepCopy() + err = c.Update("foo", objUpdate) + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(objBefore).To(BeComparableTo(objUpdate), "obj before and after must be the same") + + g.Expect(h.Events()).ToNot(ContainElement("foo, CloudMachine=bar, Updated")) + }) + + t.Run("update - with changes", func(t *testing.T) { + g := NewWithT(t) + + objBefore := createMachine(t, c, "foo", "baz") + + time.Sleep(1 * time.Second) + + objUpdate := objBefore.DeepCopy() + objUpdate.Labels = map[string]string{"foo": "bar"} + err = c.Update("foo", objUpdate) + g.Expect(err).ToNot(HaveOccurred()) + + // Check all the computed fields are as expected. + g.Expect(objBefore.GetAnnotations()[lastSyncTimeAnnotation]).ToNot(Equal(objUpdate.GetAnnotations()[lastSyncTimeAnnotation]), "last sync version must be changed") + objBefore.Annotations = objUpdate.Annotations + g.Expect(objBefore.GetResourceVersion()).ToNot(Equal(objUpdate.GetResourceVersion()), "Object version must be changed") + objBefore.SetResourceVersion(objUpdate.GetResourceVersion()) + objBefore.Labels = objUpdate.Labels + g.Expect(objBefore).To(BeComparableTo(objUpdate), "everything else must be the same") + + g.Expect(h.Events()).To(ContainElement("foo, CloudMachine=baz, Updated")) + }) + + t.Run("update - with conflict", func(t *testing.T) { + g := NewWithT(t) + + objBefore := createMachine(t, c, "foo", "bazz") + + objUpdate1 := objBefore.DeepCopy() + objUpdate1.Labels = map[string]string{"foo": "bar"} + + time.Sleep(1 * time.Second) + + err = c.Update("foo", objUpdate1) + g.Expect(err).ToNot(HaveOccurred()) + + objUpdate2 := objBefore.DeepCopy() + err = c.Update("foo", objUpdate2) + g.Expect(err).To(HaveOccurred()) + g.Expect(apierrors.IsConflict(err)).To(BeTrue()) + + // TODO: check if it has been informed only once + }) + + t.Run("Update with owner references", func(t *testing.T) { + t.Run("fails for invalid owner reference", func(t *testing.T) { + g := NewWithT(t) + + obj := &cloudv1.CloudMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "child", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "something/not/valid", + Kind: "ParentKind", + Name: "parent", + }, + }, + }, + } + err := c.Update("foo", obj) + g.Expect(err).To(HaveOccurred()) + g.Expect(apierrors.IsBadRequest(err)).To(BeTrue()) + }) + t.Run("fails if referenced object does not exists", func(t *testing.T) { + g := NewWithT(t) + + objBefore := createMachine(t, c, "foo", "child1") + + objUpdate := objBefore.DeepCopy() + objUpdate.OwnerReferences = []metav1.OwnerReference{ + { + APIVersion: cloudv1.GroupVersion.String(), + Kind: "CloudMachine", + Name: "parentx", + }, + } + err := c.Update("foo", objUpdate) + g.Expect(err).To(HaveOccurred()) + g.Expect(apierrors.IsBadRequest(err)).To(BeTrue()) + }) + t.Run("updates takes care of ownedObjects", func(t *testing.T) { + g := NewWithT(t) + + createMachine(t, c, "foo", "parent1") + createMachine(t, c, "foo", "parent2") + + objBefore := &cloudv1.CloudMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "child2", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: cloudv1.GroupVersion.String(), + Kind: "CloudMachine", + Name: "parent1", + }, + }, + }, + } + err := c.Create("foo", objBefore) + g.Expect(err).ToNot(HaveOccurred()) + + objUpdate := objBefore.DeepCopy() + objUpdate.OwnerReferences[0].Name = "parent2" + + err = c.Update("foo", objUpdate) + g.Expect(err).ToNot(HaveOccurred()) + + // Check internal state of the tracker + c.lock.RLock() + defer c.lock.RUnlock() + + parent1Ref := ownReference{gvk: cloudv1.GroupVersion.WithKind(cloudv1.CloudMachineKind), key: types.NamespacedName{Namespace: "", Name: "parent1"}} + g.Expect(c.resourceGroups["foo"].ownedObjects).ToNot(HaveKey(parent1Ref), "there should not be ownedObjects for parent1") + parent2Ref := ownReference{gvk: cloudv1.GroupVersion.WithKind(cloudv1.CloudMachineKind), key: types.NamespacedName{Namespace: "", Name: "parent2"}} + g.Expect(c.resourceGroups["foo"].ownedObjects).To(HaveKey(parent2Ref), "there should be ownedObjects for parent2") + childRef := ownReference{gvk: cloudv1.GroupVersion.WithKind(cloudv1.CloudMachineKind), key: types.NamespacedName{Namespace: "", Name: "child2"}} + g.Expect(c.resourceGroups["foo"].ownedObjects[parent2Ref]).To(HaveKey(childRef), "parent2 should own child") + }) + }) + + // TODO: test system managed fields cannot be updated (see before update) + + // TODO: update list + }) + + // TODO: test patch + + t.Run("delete objects", func(t *testing.T) { + g := NewWithT(t) + + c := NewCache(scheme).(*cache) + h := &fakeHandler{} + i, err := c.GetInformer(context.TODO(), &cloudv1.CloudMachine{}) + g.Expect(err).ToNot(HaveOccurred()) + err = i.AddEventHandler(h) + g.Expect(err).ToNot(HaveOccurred()) + + c.AddResourceGroup("foo") + + t.Run("fails if resourceGroup is empty", func(t *testing.T) { + g := NewWithT(t) + + obj := &cloudv1.CloudMachine{} + err := c.Delete("", obj) + g.Expect(err).To(HaveOccurred()) + g.Expect(apierrors.IsBadRequest(err)).To(BeTrue()) + }) + + t.Run("fails if Object is nil", func(t *testing.T) { + g := NewWithT(t) + + err := c.Delete("foo", nil) + g.Expect(err).To(HaveOccurred()) + g.Expect(apierrors.IsBadRequest(err)).To(BeTrue()) + }) + + t.Run("fails if name is empty", func(t *testing.T) { + g := NewWithT(t) + + obj := &cloudv1.CloudMachine{} + err := c.Delete("foo", obj) + g.Expect(err).To(HaveOccurred()) + g.Expect(apierrors.IsBadRequest(err)).To(BeTrue()) + }) + + t.Run("fails if unknown kind", func(t *testing.T) { + // TODO implement test case + }) + + t.Run("fails if gvk doesn't exist", func(t *testing.T) { + g := NewWithT(t) + + obj := &cloudv1.CloudMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bar", + }, + } + err := c.Delete("foo", obj) + g.Expect(err).To(HaveOccurred()) + g.Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + + t.Run("fails if object doesn't exist", func(t *testing.T) { + g := NewWithT(t) + + createMachine(t, c, "foo", "barz") + + obj := &cloudv1.CloudMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bar", + }, + } + err := c.Delete("foo", obj) + g.Expect(err).To(HaveOccurred()) + g.Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + + t.Run("delete", func(t *testing.T) { + g := NewWithT(t) + + obj := createMachine(t, c, "foo", "bar") + + err := c.Delete("foo", obj) + g.Expect(err).ToNot(HaveOccurred()) + + c.lock.RLock() + defer c.lock.RUnlock() + + g.Expect(c.resourceGroups["foo"].objects).To(HaveKey(cloudv1.GroupVersion.WithKind(cloudv1.CloudMachineKind)), "gvk must exist in object tracker for foo") + g.Expect(c.resourceGroups["foo"].objects[cloudv1.GroupVersion.WithKind(cloudv1.CloudMachineKind)]).ToNot(HaveKey(types.NamespacedName{Name: "bar"}), "Object bar must not exist in object tracker for foo") + + g.Expect(h.Events()).To(ContainElement("foo, CloudMachine=bar, Deleted")) + }) + + t.Run("delete with finalizers", func(t *testing.T) { + g := NewWithT(t) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + c.garbageCollectorQueue = workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()) + go func() { + <-ctx.Done() + c.garbageCollectorQueue.ShutDown() + }() + + objBefore := &cloudv1.CloudMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Finalizers: []string{"foo"}, + }, + } + err := c.Create("foo", objBefore) + g.Expect(err).ToNot(HaveOccurred()) + + time.Sleep(1 * time.Second) + + err = c.Delete("foo", objBefore) + g.Expect(err).ToNot(HaveOccurred()) + + objAfterUpdate := &cloudv1.CloudMachine{} + err = c.Get("foo", types.NamespacedName{Name: "baz"}, objAfterUpdate) + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(objBefore.GetDeletionTimestamp().IsZero()).To(BeTrue(), "deletion timestamp before delete must not be set") + g.Expect(objAfterUpdate.GetDeletionTimestamp().IsZero()).To(BeFalse(), "deletion timestamp after delete must be set") + objBefore.DeletionTimestamp = objAfterUpdate.DeletionTimestamp + g.Expect(objBefore.GetAnnotations()[lastSyncTimeAnnotation]).ToNot(Equal(objAfterUpdate.GetAnnotations()[lastSyncTimeAnnotation]), "last sync version must be changed") + objBefore.Annotations = objAfterUpdate.Annotations + g.Expect(objBefore.GetResourceVersion()).ToNot(Equal(objAfterUpdate.GetResourceVersion()), "Object version must be changed") + objBefore.SetResourceVersion(objAfterUpdate.GetResourceVersion()) + objBefore.Labels = objAfterUpdate.Labels + g.Expect(objBefore).To(BeComparableTo(objAfterUpdate), "everything else must be the same") + + g.Expect(h.Events()).To(ContainElement("foo, CloudMachine=baz, Deleted")) + + cancel() + }) + + t.Run("delete with owner reference", func(t *testing.T) { + g := NewWithT(t) + + createMachine(t, c, "foo", "parent3") + + obj := &cloudv1.CloudMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "child3", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: cloudv1.GroupVersion.String(), + Kind: "CloudMachine", + Name: "parent3", + }, + }, + }, + } + err := c.Create("foo", obj) + g.Expect(err).ToNot(HaveOccurred()) + + obj = &cloudv1.CloudMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "grandchild3", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: cloudv1.GroupVersion.String(), + Kind: "CloudMachine", + Name: "child3", + }, + }, + }, + } + err = c.Create("foo", obj) + g.Expect(err).ToNot(HaveOccurred()) + + obj = &cloudv1.CloudMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "parent3", + }, + } + err = c.Delete("foo", obj) + g.Expect(err).ToNot(HaveOccurred()) + + c.lock.RLock() + defer c.lock.RUnlock() + + g.Expect(c.resourceGroups["foo"].objects).To(HaveKey(cloudv1.GroupVersion.WithKind(cloudv1.CloudMachineKind)), "gvk must exist in object tracker for foo") + g.Expect(c.resourceGroups["foo"].objects[cloudv1.GroupVersion.WithKind(cloudv1.CloudMachineKind)]).ToNot(HaveKey(types.NamespacedName{Name: "parent3"}), "Object parent3 must not exist in object tracker for foo") + g.Expect(c.resourceGroups["foo"].objects[cloudv1.GroupVersion.WithKind(cloudv1.CloudMachineKind)]).ToNot(HaveKey(types.NamespacedName{Name: "child3"}), "Object child3 must not exist in object tracker for foo") + g.Expect(c.resourceGroups["foo"].objects[cloudv1.GroupVersion.WithKind(cloudv1.CloudMachineKind)]).ToNot(HaveKey(types.NamespacedName{Name: "grandchild3"}), "Object grandchild3 must not exist in object tracker for foo") + }) + + // TODO: test finalizers and ownner references together + }) +} + +func createMachine(t *testing.T, c *cache, resourceGroup, name string) *cloudv1.CloudMachine { + t.Helper() + + g := NewWithT(t) + + obj := &cloudv1.CloudMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } + err := c.Create(resourceGroup, obj) + g.Expect(err).ToNot(HaveOccurred()) + + return obj +} + +var _ Informer = &fakeInformer{} + +type fakeInformer struct { + handler InformEventHandler +} + +func (i *fakeInformer) AddEventHandler(handler InformEventHandler) error { + i.handler = handler + return nil +} + +func (i *fakeInformer) RemoveEventHandler(_ InformEventHandler) error { + i.handler = nil + return nil +} + +func (i *fakeInformer) InformCreate(resourceGroup string, obj client.Object) { + i.handler.OnCreate(resourceGroup, obj) +} + +func (i *fakeInformer) InformUpdate(resourceGroup string, oldObj, newObj client.Object) { + i.handler.OnUpdate(resourceGroup, oldObj, newObj) +} + +func (i *fakeInformer) InformDelete(resourceGroup string, res client.Object) { + i.handler.OnDelete(resourceGroup, res) +} + +func (i *fakeInformer) InformGeneric(resourceGroup string, res client.Object) { + i.handler.OnGeneric(resourceGroup, res) +} + +var _ InformEventHandler = &fakeHandler{} + +type fakeHandler struct { + events []string +} + +func (h *fakeHandler) Events() []string { + return h.events +} + +func (h *fakeHandler) OnCreate(resourceGroup string, obj client.Object) { + h.events = append(h.events, fmt.Sprintf("%s, %s=%s, Created", resourceGroup, obj.GetObjectKind().GroupVersionKind().Kind, obj.GetName())) +} + +func (h *fakeHandler) OnUpdate(resourceGroup string, _, newObj client.Object) { + h.events = append(h.events, fmt.Sprintf("%s, %s=%s, Updated", resourceGroup, newObj.GetObjectKind().GroupVersionKind().Kind, newObj.GetName())) +} + +func (h *fakeHandler) OnDelete(resourceGroup string, obj client.Object) { + h.events = append(h.events, fmt.Sprintf("%s, %s=%s, Deleted", resourceGroup, obj.GetObjectKind().GroupVersionKind().Kind, obj.GetName())) +} + +func (h *fakeHandler) OnGeneric(resourceGroup string, obj client.Object) { + h.events = append(h.events, fmt.Sprintf("%s, %s=%s, Synced", resourceGroup, obj.GetObjectKind().GroupVersionKind().Kind, obj.GetName())) +} diff --git a/test/infrastructure/tmp-to-be-deleted/runtime/cache/doc.go b/test/infrastructure/tmp-to-be-deleted/runtime/cache/doc.go new file mode 100644 index 0000000000..7b4b73aa16 --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/runtime/cache/doc.go @@ -0,0 +1,25 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package cache defines resource group aware Cache. + +The Cache implements sync loop and garbage collector inspired from the ones existing +in Kubernetes. + +Note: The cloud runtime is using a Cache for all the resource groups. +*/ +package cache diff --git a/test/infrastructure/tmp-to-be-deleted/runtime/cache/gc.go b/test/infrastructure/tmp-to-be-deleted/runtime/cache/gc.go new file mode 100644 index 0000000000..85c134a117 --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/runtime/cache/gc.go @@ -0,0 +1,114 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + "time" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" +) + +type gcRequest struct { + resourceGroup string + gvk schema.GroupVersionKind + key types.NamespacedName +} + +func (c *cache) startGarbageCollector(ctx context.Context) error { + log := ctrl.LoggerFrom(ctx).WithValues("controller", "gc") // TODO: consider if to use something different than controller + ctx = ctrl.LoggerInto(ctx, log) + + log.Info("Starting garbage collector queue") + c.garbageCollectorQueue = workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()) + go func() { + <-ctx.Done() + c.garbageCollectorQueue.ShutDown() + }() + + var workers int64 + go func() { + log.Info("Starting garbage collector workers", "count", c.garbageCollectorConcurrency) + wg := &sync.WaitGroup{} + wg.Add(c.garbageCollectorConcurrency) + for i := 0; i < c.garbageCollectorConcurrency; i++ { + go func() { + atomic.AddInt64(&workers, 1) + defer wg.Done() + for c.processGarbageCollectorWorkItem(ctx) { + } + }() + } + <-ctx.Done() + wg.Wait() + }() + + if err := wait.PollUntilContextTimeout(ctx, 50*time.Millisecond, 5*time.Second, false, func(ctx context.Context) (done bool, err error) { + if atomic.LoadInt64(&workers) < int64(c.garbageCollectorConcurrency) { + return false, nil + } + return true, nil + }); err != nil { + return fmt.Errorf("failed to start garbage collector workers: %v", err) + } + return nil +} + +func (c *cache) processGarbageCollectorWorkItem(ctx context.Context) bool { + log := ctrl.LoggerFrom(ctx) + + item, shutdown := c.garbageCollectorQueue.Get() + if shutdown { + return false + } + + // TODO(Fabrizio): Why are we calling the same in defer and directly + defer func() { + c.garbageCollectorQueue.Done(item) + }() + c.garbageCollectorQueue.Done(item) + + gcr, ok := item.(gcRequest) + if !ok { + c.garbageCollectorQueue.Forget(item) + return true + } + + deleted, err := c.tryDelete(gcr.resourceGroup, gcr.gvk, gcr.key) + if err != nil { + log.Error(err, "Error garbage collecting object", "resourceGroup", gcr.resourceGroup, gcr.gvk.Kind, gcr.key) + } + + if err == nil && deleted { + c.garbageCollectorQueue.Forget(item) + log.Info("Object garbage collected", "resourceGroup", gcr.resourceGroup, gcr.gvk.Kind, gcr.key) + return true + } + + c.garbageCollectorQueue.Forget(item) + + requeueAfter := wait.Jitter(c.garbageCollectorRequeueAfter, c.garbageCollectorRequeueAfterJitterFactor) + c.garbageCollectorQueue.AddAfter(item, requeueAfter) + return true +} diff --git a/test/infrastructure/tmp-to-be-deleted/runtime/cache/gc_test.go b/test/infrastructure/tmp-to-be-deleted/runtime/cache/gc_test.go new file mode 100644 index 0000000000..7cba19827f --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/runtime/cache/gc_test.go @@ -0,0 +1,85 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "testing" + "time" + + . "github.com/onsi/gomega" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + cloudv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/api/v1alpha1" +) + +func Test_cache_gc(t *testing.T) { + g := NewWithT(t) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + c := NewCache(scheme).(*cache) + c.garbageCollectorRequeueAfter = 500 * time.Millisecond // force a shorter gc requeueAfter + err := c.Start(ctx) + g.Expect(err).ToNot(HaveOccurred()) + + g.Eventually(func() bool { + return c.started + }, 5*time.Second, 200*time.Millisecond).Should(BeTrue(), "manager should start") + + c.AddResourceGroup("foo") + + obj := &cloudv1.CloudMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Finalizers: []string{"foo"}, + }, + } + err = c.Create("foo", obj) + g.Expect(err).ToNot(HaveOccurred()) + + err = c.Delete("foo", obj) + g.Expect(err).ToNot(HaveOccurred()) + + g.Consistently(func() bool { + if err := c.Get("foo", types.NamespacedName{Name: "baz"}, obj); apierrors.IsNotFound(err) { + return true + } + return false + }, 5*time.Second, 200*time.Millisecond).Should(BeFalse(), "object with finalizer should never be deleted") + + obj.Finalizers = nil + err = c.Update("foo", obj) + g.Expect(err).ToNot(HaveOccurred()) + + g.Eventually(func() bool { + if err := c.Get("foo", types.NamespacedName{Name: "baz"}, obj); apierrors.IsNotFound(err) { + return true + } + return false + }, 5*time.Second, 200*time.Millisecond).Should(BeTrue(), "object should be garbage collected") + + c.lock.RLock() + defer c.lock.RUnlock() + + g.Expect(c.resourceGroups["foo"].objects).To(HaveKey(cloudv1.GroupVersion.WithKind(cloudv1.CloudMachineKind)), "gvk must exists in object tracker for foo") + g.Expect(c.resourceGroups["foo"].objects[cloudv1.GroupVersion.WithKind(cloudv1.CloudMachineKind)]).ToNot(HaveKey("baz"), "object baz must not exist in object tracker for foo") + + cancel() +} diff --git a/test/infrastructure/tmp-to-be-deleted/runtime/cache/hooks.go b/test/infrastructure/tmp-to-be-deleted/runtime/cache/hooks.go new file mode 100644 index 0000000000..44b65e1d1c --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/runtime/cache/hooks.go @@ -0,0 +1,89 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (c *cache) beforeCreate(_ string, obj client.Object) error { + now := time.Now().UTC() + obj.SetCreationTimestamp(metav1.Time{Time: now}) + // TODO: UID + obj.SetAnnotations(appendAnnotations(obj, lastSyncTimeAnnotation, now.Format(time.RFC3339))) + obj.SetResourceVersion(fmt.Sprintf("v%d", 1)) + return nil +} + +func (c *cache) afterCreate(resourceGroup string, obj client.Object) { + c.informCreate(resourceGroup, obj) +} + +func (c *cache) beforeUpdate(_ string, oldObj, newObj client.Object) error { + newObj.SetCreationTimestamp(oldObj.GetCreationTimestamp()) + newObj.SetResourceVersion(oldObj.GetResourceVersion()) + // TODO: UID + newObj.SetAnnotations(appendAnnotations(newObj, lastSyncTimeAnnotation, oldObj.GetAnnotations()[lastSyncTimeAnnotation])) + if !oldObj.GetDeletionTimestamp().IsZero() { + newObj.SetDeletionTimestamp(oldObj.GetDeletionTimestamp()) + } + if !reflect.DeepEqual(newObj, oldObj) { + now := time.Now().UTC() + newObj.SetAnnotations(appendAnnotations(newObj, lastSyncTimeAnnotation, now.Format(time.RFC3339))) + + oldResourceVersion, _ := strconv.Atoi(strings.TrimPrefix(oldObj.GetResourceVersion(), "v")) + newObj.SetResourceVersion(fmt.Sprintf("v%d", oldResourceVersion+1)) + } + return nil +} + +func (c *cache) afterUpdate(resourceGroup string, oldObj, newObj client.Object) { + if oldObj.GetDeletionTimestamp().IsZero() && !newObj.GetDeletionTimestamp().IsZero() { + c.informDelete(resourceGroup, newObj) + return + } + if !reflect.DeepEqual(newObj, oldObj) { + c.informUpdate(resourceGroup, oldObj, newObj) + } +} + +func (c *cache) beforeDelete(_ string, _ client.Object) error { + return nil +} + +func (c *cache) afterDelete(_ string, _ client.Object) { +} + +func appendAnnotations(obj client.Object, kayValuePair ...string) map[string]string { + newAnnotations := map[string]string{} + for k, v := range obj.GetAnnotations() { + newAnnotations[k] = v + } + for i := 0; i < len(kayValuePair)-1; i += 2 { + k := kayValuePair[i] + v := kayValuePair[i+1] + newAnnotations[k] = v + } + return newAnnotations +} diff --git a/test/infrastructure/tmp-to-be-deleted/runtime/cache/informer.go b/test/infrastructure/tmp-to-be-deleted/runtime/cache/informer.go new file mode 100644 index 0000000000..18fe4ddda9 --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/runtime/cache/informer.go @@ -0,0 +1,128 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "sync" + + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +type informer struct { + handlers []InformEventHandler + lock sync.RWMutex +} + +func (i *informer) AddEventHandler(handler InformEventHandler) error { + i.lock.Lock() + defer i.lock.Unlock() + + i.handlers = append(i.handlers, handler) + return nil +} + +func (i *informer) RemoveEventHandler(handler InformEventHandler) error { + i.lock.Lock() + defer i.lock.Unlock() + for j, h := range i.handlers { + if h == handler { + i.handlers = append(i.handlers[:j], i.handlers[j+1:]...) + } + } + return nil +} + +func (c *cache) GetInformer(ctx context.Context, obj client.Object) (Informer, error) { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return nil, err + } + return c.GetInformerForKind(ctx, gvk) +} + +func (c *cache) GetInformerForKind(_ context.Context, gvk schema.GroupVersionKind) (Informer, error) { + c.lock.Lock() + defer c.lock.Unlock() + + if _, ok := c.informers[gvk]; !ok { + c.informers[gvk] = &informer{} + } + return c.informers[gvk], nil +} + +func (c *cache) informCreate(resourceGroup string, obj client.Object) { + c.lock.RLock() + defer c.lock.RUnlock() + + if i, ok := c.informers[obj.GetObjectKind().GroupVersionKind()]; ok { + i := i.(*informer) + i.lock.RLock() + defer i.lock.RUnlock() + + for _, h := range i.handlers { + h.OnCreate(resourceGroup, obj) + } + } +} + +func (c *cache) informUpdate(resourceGroup string, oldObj, newObj client.Object) { + c.lock.RLock() + defer c.lock.RUnlock() + + if i, ok := c.informers[newObj.GetObjectKind().GroupVersionKind()]; ok { + i := i.(*informer) + i.lock.RLock() + defer i.lock.RUnlock() + + for _, h := range i.handlers { + h.OnUpdate(resourceGroup, oldObj, newObj) + } + } +} + +func (c *cache) informDelete(resourceGroup string, obj client.Object) { + c.lock.RLock() + defer c.lock.RUnlock() + + if i, ok := c.informers[obj.GetObjectKind().GroupVersionKind()]; ok { + i := i.(*informer) + i.lock.RLock() + defer i.lock.RUnlock() + + for _, h := range i.handlers { + h.OnDelete(resourceGroup, obj) + } + } +} + +func (c *cache) informSync(resourceGroup string, obj client.Object) { + c.lock.RLock() + defer c.lock.RUnlock() + + if i, ok := c.informers[obj.GetObjectKind().GroupVersionKind()]; ok { + i := i.(*informer) + i.lock.RLock() + defer i.lock.RUnlock() + + for _, h := range i.handlers { + h.OnGeneric(resourceGroup, obj) + } + } +} diff --git a/test/infrastructure/tmp-to-be-deleted/runtime/cache/sync.go b/test/infrastructure/tmp-to-be-deleted/runtime/cache/sync.go new file mode 100644 index 0000000000..1caa13e9f4 --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/runtime/cache/sync.go @@ -0,0 +1,196 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + "time" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const lastSyncTimeAnnotation = "inmemory.infrastructure.cluster.x-k8s.io/last-sync" + +type resyncRequest struct { + resourceGroup string + gvk schema.GroupVersionKind + key types.NamespacedName +} + +func (c *cache) startSyncer(ctx context.Context) error { + log := ctrl.LoggerFrom(ctx).WithValues("controller", "syncer") // TODO: consider if to use something different than controller + ctx = ctrl.LoggerInto(ctx, log) + + log.Info("Starting syncer queue") + c.syncQueue = workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()) + go func() { + <-ctx.Done() + c.syncQueue.ShutDown() + }() + + syncLoopStarted := false + go func() { + log.Info("Starting sync loop") + syncLoopStarted = true + for { + select { + case <-time.After(c.syncPeriod / 4): + c.syncGroup(ctx) + case <-ctx.Done(): + return + } + } + }() + + var workers int64 + go func() { + log.Info("Starting sync workers", "count", c.syncConcurrency) + wg := &sync.WaitGroup{} + wg.Add(c.syncConcurrency) + for i := 0; i < c.syncConcurrency; i++ { + go func() { + atomic.AddInt64(&workers, 1) + defer wg.Done() + for c.processSyncWorkItem(ctx) { + } + }() + } + <-ctx.Done() + wg.Wait() + }() + + if err := wait.PollUntilContextTimeout(ctx, 50*time.Millisecond, 5*time.Second, false, func(ctx context.Context) (done bool, err error) { + if !syncLoopStarted { + return false, nil + } + return true, nil + }); err != nil { + return fmt.Errorf("failed to start sync loop: %v", err) + } + + if err := wait.PollUntilContextTimeout(ctx, 50*time.Millisecond, 5*time.Second, false, func(ctx context.Context) (done bool, err error) { + if atomic.LoadInt64(&workers) < int64(c.syncConcurrency) { + return false, nil + } + return true, nil + }); err != nil { + return fmt.Errorf("failed to start sync workers: %v", err) + } + return nil +} + +func (c *cache) syncGroup(ctx context.Context) { + log := ctrl.LoggerFrom(ctx) + + c.lock.RLock() + defer c.lock.RUnlock() + i := 0 + for resourceGroup, tracker := range c.resourceGroups { + i += c.syncResourceGroupTracker(ctx, resourceGroup, tracker) + } + log.Info("Sync loop", "queuedResources", i) +} + +func (c *cache) syncResourceGroupTracker(_ context.Context, resourceGroup string, tracker *resourceGroupTracker) int { + tracker.lock.RLock() + defer tracker.lock.RUnlock() + + syncBeforeTime := time.Now().UTC().Add(-c.syncPeriod) + i := 0 + for gvk, objects := range tracker.objects { + for key, obj := range objects { + if lastSync, ok := lastSyncTimeAnnotationValue(obj); ok { + if lastSync.After(syncBeforeTime) { + continue + } + } + i++ + c.syncQueue.Add(resyncRequest{ + resourceGroup: resourceGroup, + gvk: gvk, + key: key, + }) + } + } + return i +} + +func (c *cache) processSyncWorkItem(ctx context.Context) bool { + log := ctrl.LoggerFrom(ctx) + + item, shutdown := c.syncQueue.Get() + if shutdown { + return false + } + + defer func() { + c.syncQueue.Forget(item) + c.syncQueue.Done(item) + }() + + rr, ok := item.(resyncRequest) + if !ok { + return true + } + + tracker := c.resourceGroupTracker(rr.resourceGroup) + if tracker == nil { + return true + } + + tracker.lock.Lock() + defer tracker.lock.Unlock() + + objects, ok := tracker.objects[rr.gvk] + if !ok { + return true + } + + obj, ok := objects[rr.key] + if !ok { + return true + } + + now := time.Now().UTC() + obj.SetAnnotations(appendAnnotations(obj, lastSyncTimeAnnotation, now.Format(time.RFC3339))) + tracker.objects[rr.gvk][rr.key] = obj + + log.V(4).Info("Object sync triggered", "resourceGroup", rr.resourceGroup, rr.gvk.Kind, rr.key) + c.informSync(rr.resourceGroup, obj) + return true +} + +func lastSyncTimeAnnotationValue(obj client.Object) (time.Time, bool) { + value, ok := obj.GetAnnotations()[lastSyncTimeAnnotation] + if !ok { + return time.Time{}, false + } + + valueTime, err := time.Parse(time.RFC3339, value) + if err != nil { + return time.Time{}, false + } + return valueTime, true +} diff --git a/test/infrastructure/tmp-to-be-deleted/runtime/cache/sync_test.go b/test/infrastructure/tmp-to-be-deleted/runtime/cache/sync_test.go new file mode 100644 index 0000000000..c803631915 --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/runtime/cache/sync_test.go @@ -0,0 +1,85 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "testing" + "time" + + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + cloudv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/api/v1alpha1" +) + +func Test_cache_sync(t *testing.T) { + g := NewWithT(t) + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + c := NewCache(scheme).(*cache) + c.syncPeriod = 5 * time.Second // force a shorter sync period + h := &fakeHandler{} + i, err := c.GetInformer(ctx, &cloudv1.CloudMachine{}) + g.Expect(err).ToNot(HaveOccurred()) + err = i.AddEventHandler(h) + g.Expect(err).ToNot(HaveOccurred()) + + err = c.Start(ctx) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(c.started).To(BeTrue()) + + c.AddResourceGroup("foo") + + obj := &cloudv1.CloudMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + }, + } + err = c.Create("foo", obj) + g.Expect(err).ToNot(HaveOccurred()) + + objBefore := &cloudv1.CloudMachine{} + err = c.Get("foo", types.NamespacedName{Name: "baz"}, objBefore) + g.Expect(err).ToNot(HaveOccurred()) + + lastSyncBefore, ok := lastSyncTimeAnnotationValue(objBefore) + g.Expect(ok).To(BeTrue()) + + g.Eventually(func() bool { + objAfter := &cloudv1.CloudMachine{} + err = c.Get("foo", types.NamespacedName{Name: "baz"}, objAfter) + if err != nil { + return false + } + lastSyncAfter, ok := lastSyncTimeAnnotationValue(objAfter) + if !ok { + return false + } + if lastSyncBefore != lastSyncAfter { + return true + } + return false + }, 10*time.Second, 200*time.Millisecond).Should(BeTrue(), "object should be synced") + + g.Expect(h.Events()).To(ContainElement("foo, CloudMachine=baz, Synced")) + + cancel() +} diff --git a/test/infrastructure/tmp-to-be-deleted/runtime/client/client.go b/test/infrastructure/tmp-to-be-deleted/runtime/client/client.go new file mode 100644 index 0000000000..c011f57a62 --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/runtime/client/client.go @@ -0,0 +1,53 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Reader knows how to read and list resources in a resource group. +type Reader interface { + // Get retrieves a resource for the given object key. + Get(ctx context.Context, key client.ObjectKey, obj client.Object) error + + // List retrieves list of objects for a given namespace and list options. + List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error +} + +// Writer knows how to create, delete, and update resources in a resource group. +type Writer interface { + // Create saves a resource in a resource group. + Create(ctx context.Context, obj client.Object) error + + // Delete deletes a resource from a resource group. + Delete(ctx context.Context, obj client.Object) error + + // Update updates a resource in a resource group. + Update(ctx context.Context, obj client.Object) error + + // Patch patches a resource in a resource group. + Patch(ctx context.Context, obj client.Object, patch client.Patch) error +} + +// Client knows how to perform CRUD operations on resources in a resource group. +type Client interface { + Reader + Writer +} diff --git a/test/infrastructure/tmp-to-be-deleted/runtime/client/doc.go b/test/infrastructure/tmp-to-be-deleted/runtime/client/doc.go new file mode 100644 index 0000000000..220f52b7cd --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/runtime/client/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package client defines Client for operating on resource groups. + +The implementation is derived from sigs.k8s.io/controller-runtime/pkg/client and the main difference is that +only interfaces are defined. +*/ +package client diff --git a/test/infrastructure/tmp-to-be-deleted/runtime/doc.go b/test/infrastructure/tmp-to-be-deleted/runtime/doc.go new file mode 100644 index 0000000000..5ae88e5203 --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/runtime/doc.go @@ -0,0 +1,31 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package runtime implements an in memory runtime for handling objects grouped in resource groups, +similarly to resource groups in Azure. + +In memory objects are defined like Kubernetes objects and they can be operated with +a client inspired from the controller-runtime client; they also have some behaviour +of real Kubernetes objects, like e.g. a garbage collection and owner references, +as well as informers to support watches. + +NOTE: We can't use controller-runtime directly for the following reasons: +* multi-cluster (we have resourceGroups to differentiate resources belonging to different clusters) +* data should be stored in-memory +* we would like that objects in memory behave like Kubernetes objects (garbage collection). +*/ +package runtime diff --git a/test/infrastructure/tmp-to-be-deleted/runtime/manager/doc.go b/test/infrastructure/tmp-to-be-deleted/runtime/manager/doc.go new file mode 100644 index 0000000000..401228e7cc --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/runtime/manager/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package manager defines a Manager for resource groups. + +Resource groups are stored into an in-memory Cache, and objects via a client. +*/ +package manager diff --git a/test/infrastructure/tmp-to-be-deleted/runtime/manager/manager.go b/test/infrastructure/tmp-to-be-deleted/runtime/manager/manager.go new file mode 100644 index 0000000000..6c7fc1a4a4 --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/runtime/manager/manager.go @@ -0,0 +1,102 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package manager + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + + inmemorycache "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/runtime/cache" + inmemoryresoucegroup "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/runtime/resourcegroup" +) + +// Manager initializes shared dependencies such as Caches and Clients. +type Manager interface { + // TODO: refactor in resoucegroup.add/delete/get; make delete fail if rs does not exist + AddResourceGroup(name string) + DeleteResourceGroup(name string) + GetResourceGroup(name string) inmemoryresoucegroup.ResourceGroup + + GetScheme() *runtime.Scheme + + // TODO: expose less (only get informers) + GetCache() inmemorycache.Cache + + Start(ctx context.Context) error +} + +var _ Manager = &manager{} + +type manager struct { + scheme *runtime.Scheme + + cache inmemorycache.Cache + started bool +} + +// New creates a new manager. +func New(scheme *runtime.Scheme) Manager { + m := &manager{ + scheme: scheme, + } + m.cache = inmemorycache.NewCache(scheme) + return m +} + +func (m *manager) AddResourceGroup(name string) { + m.cache.AddResourceGroup(name) +} + +func (m *manager) DeleteResourceGroup(name string) { + m.cache.DeleteResourceGroup(name) +} + +// GetResourceGroup returns a resource group which reads from the cache. +func (m *manager) GetResourceGroup(name string) inmemoryresoucegroup.ResourceGroup { + return inmemoryresoucegroup.NewResourceGroup(name, m.cache) +} + +func (m *manager) GetScheme() *runtime.Scheme { + return m.scheme +} + +func (m *manager) GetCache() inmemorycache.Cache { + return m.cache +} + +func (m *manager) Start(ctx context.Context) error { + log := ctrl.LoggerFrom(ctx) + + if ctx == nil { + return fmt.Errorf("context cannot be nil") + } + + if m.started { + return fmt.Errorf("manager started more than once") + } + + if err := m.cache.Start(ctx); err != nil { + return fmt.Errorf("failed to start cache: %v", err) + } + + m.started = true + log.Info("Manager successfully started!") + return nil +} diff --git a/test/infrastructure/tmp-to-be-deleted/runtime/resourcegroup/cached_resourcegroup.go b/test/infrastructure/tmp-to-be-deleted/runtime/resourcegroup/cached_resourcegroup.go new file mode 100644 index 0000000000..e32c9ad64c --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/runtime/resourcegroup/cached_resourcegroup.go @@ -0,0 +1,79 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resourcegroup + +import ( + "context" + + "sigs.k8s.io/controller-runtime/pkg/client" + + inmemorycache "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/runtime/cache" + inmemoryclient "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/runtime/client" +) + +var _ ResourceGroup = &cachedResourceGroup{} + +type cachedResourceGroup struct { + name string + cache inmemorycache.Cache +} + +// NewResourceGroup returns a new resource group. +func NewResourceGroup(name string, cache inmemorycache.Cache) ResourceGroup { + return &cachedResourceGroup{ + name: name, + cache: cache, + } +} + +func (cc *cachedResourceGroup) GetClient() inmemoryclient.Client { + return &cachedClient{ + resourceGroup: cc.name, + cache: cc.cache, + } +} + +var _ inmemoryclient.Client = &cachedClient{} + +type cachedClient struct { + resourceGroup string + cache inmemorycache.Cache +} + +func (c *cachedClient) Get(_ context.Context, key client.ObjectKey, obj client.Object) error { + return c.cache.Get(c.resourceGroup, key, obj) +} + +func (c *cachedClient) List(_ context.Context, list client.ObjectList, opts ...client.ListOption) error { + return c.cache.List(c.resourceGroup, list, opts...) +} + +func (c *cachedClient) Create(_ context.Context, obj client.Object) error { + return c.cache.Create(c.resourceGroup, obj) +} + +func (c *cachedClient) Delete(_ context.Context, obj client.Object) error { + return c.cache.Delete(c.resourceGroup, obj) +} + +func (c *cachedClient) Update(_ context.Context, obj client.Object) error { + return c.cache.Update(c.resourceGroup, obj) +} + +func (c *cachedClient) Patch(_ context.Context, obj client.Object, patch client.Patch) error { + return c.cache.Patch(c.resourceGroup, obj, patch) +} diff --git a/test/infrastructure/tmp-to-be-deleted/runtime/resourcegroup/doc.go b/test/infrastructure/tmp-to-be-deleted/runtime/resourcegroup/doc.go new file mode 100644 index 0000000000..9bdafd744d --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/runtime/resourcegroup/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package resourcegroup defines a ResourceGroup; the default implementation is backed by a cache. +*/ +package resourcegroup diff --git a/test/infrastructure/tmp-to-be-deleted/runtime/resourcegroup/resourcegroup.go b/test/infrastructure/tmp-to-be-deleted/runtime/resourcegroup/resourcegroup.go new file mode 100644 index 0000000000..8fa1e34011 --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/runtime/resourcegroup/resourcegroup.go @@ -0,0 +1,26 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resourcegroup + +import ( + inmemoryclient "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/runtime/client" +) + +// ResourceGroup groups resources for a workload cluster. +type ResourceGroup interface { + GetClient() inmemoryclient.Client +} diff --git a/test/infrastructure/tmp-to-be-deleted/server/api/const.go b/test/infrastructure/tmp-to-be-deleted/server/api/const.go new file mode 100644 index 0000000000..98a18e97bf --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/server/api/const.go @@ -0,0 +1,301 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +var ( + // apiVersions is the value returned by /api discovery call. + // Note: This must contain all APIs required by CAPI. + apiVersions = &metav1.APIVersions{ + Versions: []string{"v1"}, + } + + // apiVersions is the value returned by /api/v1 discovery call. + // Note: This must contain all APIs required by CAPI. + corev1APIResourceList = &metav1.APIResourceList{ + GroupVersion: "v1", + APIResources: []metav1.APIResource{ + { + Name: "configmaps", + SingularName: "", + Namespaced: true, + Kind: "ConfigMap", + Verbs: []string{ + "create", + "delete", + "deletecollection", + "get", + "list", + "patch", + "update", + "watch", + }, + ShortNames: []string{ + "cm", + }, + StorageVersionHash: "", + }, + { + Name: "endpoints", + SingularName: "", + Namespaced: true, + Kind: "Endpoints", + Verbs: []string{ + "create", + "delete", + "deletecollection", + "get", + "list", + "patch", + "update", + "watch", + }, + ShortNames: []string{ + "ep", + }, + StorageVersionHash: "", + }, + { + Name: "nodes", + SingularName: "", + Namespaced: false, + Kind: "Node", + Verbs: []string{ + "get", + "list", + "watch", + }, + ShortNames: []string{ + "no", + }, + StorageVersionHash: "", + }, + { + Name: "pods", + SingularName: "", + Namespaced: true, + Kind: "Pod", + Verbs: []string{ + "create", + "delete", + "deletecollection", + "get", + "list", + "patch", + "update", + "watch", + }, + ShortNames: []string{ + "po", + }, + StorageVersionHash: "", + }, + { + Name: "secrets", + SingularName: "", + Namespaced: true, + Kind: "Secret", + Verbs: []string{ + "create", + "delete", + "deletecollection", + "get", + "list", + "patch", + "update", + "watch", + }, + StorageVersionHash: "", + }, + { + Name: "services", + SingularName: "", + Namespaced: true, + Kind: "Service", + Verbs: []string{ + "create", + "delete", + "deletecollection", + "get", + "list", + "patch", + "update", + "watch", + }, + ShortNames: []string{ + "svc", + }, + StorageVersionHash: "", + }, + }, + } + + // apiVersions is the value returned by /apis discovery call. + // Note: This must contain all APIs required by CAPI. + apiGroupList = &metav1.APIGroupList{ + Groups: []metav1.APIGroup{ + { + Name: "rbac.authorization.k8s.io", + Versions: []metav1.GroupVersionForDiscovery{ + { + GroupVersion: "rbac.authorization.k8s.io/v1", + Version: "v1", + }, + }, + PreferredVersion: metav1.GroupVersionForDiscovery{ + GroupVersion: "rbac.authorization.k8s.io/v1", + Version: "v1", + }, + }, + { + Name: "apps", + Versions: []metav1.GroupVersionForDiscovery{ + { + GroupVersion: "apps/v1", + Version: "v1", + }, + }, + PreferredVersion: metav1.GroupVersionForDiscovery{ + GroupVersion: "apps/v1", + Version: "v1", + }, + }, + }, + } + + // apiVersions is the value returned by /apis/rbac.authorization.k8s.io/v1 discovery call. + // Note: This must contain all APIs required by CAPI. + rbacv1APIResourceList = &metav1.APIResourceList{ + GroupVersion: "rbac.authorization.k8s.io/v1", + APIResources: []metav1.APIResource{ + { + Name: "clusterrolebindings", + SingularName: "", + Namespaced: false, + Kind: "ClusterRoleBinding", + Verbs: []string{ + "create", + "delete", + "deletecollection", + "get", + "list", + "patch", + "update", + "watch", + }, + StorageVersionHash: "", + }, + { + Name: "clusterroles", + SingularName: "", + Namespaced: false, + Kind: "ClusterRole", + Verbs: []string{ + "create", + "delete", + "deletecollection", + "get", + "list", + "patch", + "update", + "watch", + }, + StorageVersionHash: "", + }, + { + Name: "rolebindings", + SingularName: "", + Namespaced: true, + Kind: "RoleBinding", + Verbs: []string{ + "create", + "delete", + "deletecollection", + "get", + "list", + "patch", + "update", + "watch", + }, + StorageVersionHash: "", + }, + { + Name: "roles", + SingularName: "", + Namespaced: true, + Kind: "Role", + Verbs: []string{ + "create", + "delete", + "deletecollection", + "get", + "list", + "patch", + "update", + "watch", + }, + StorageVersionHash: "", + }, + }, + } + appsV1ResourceList = &metav1.APIResourceList{ + GroupVersion: "apps/v1", + APIResources: []metav1.APIResource{ + { + Name: "daemonsets", + SingularName: "daemonset", + Namespaced: true, + Kind: "DaemonSet", + Verbs: []string{ + "create", + "delete", + "deletecollection", + "get", + "list", + "patch", + "update", + "watch", + }, + ShortNames: []string{ + "ds", + }, + StorageVersionHash: "", + }, + { + Name: "deployments", + SingularName: "deployment", + Namespaced: true, + Kind: "Deployment", + Verbs: []string{ + "create", + "delete", + "deletecollection", + "get", + "list", + "patch", + "update", + "watch", + }, + ShortNames: []string{ + "deploy", + }, + StorageVersionHash: "", + }, + }, + } +) diff --git a/test/infrastructure/tmp-to-be-deleted/server/api/debug.go b/test/infrastructure/tmp-to-be-deleted/server/api/debug.go new file mode 100644 index 0000000000..a852b99dd9 --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/server/api/debug.go @@ -0,0 +1,73 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "net/http" + + "github.com/emicklei/go-restful/v3" + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/runtime" + + inmemoryruntime "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/runtime" +) + +// DebugInfoProvider defines the methods the server must implement +// to provide debug info. +type DebugInfoProvider interface { + ListListeners() map[string]string +} + +// NewDebugHandler returns an http.Handler for debugging the server. +func NewDebugHandler(manager inmemoryruntime.Manager, log logr.Logger, infoProvider DebugInfoProvider) http.Handler { + debugServer := &debugHandler{ + container: restful.NewContainer(), + manager: manager, + log: log, + infoProvider: infoProvider, + } + + ws := new(restful.WebService) + ws.Produces(runtime.ContentTypeJSON) + + // Discovery endpoints + ws.Route(ws.GET("/listeners").To(debugServer.listenersList)) + + debugServer.container.Add(ws) + + return debugServer +} + +type debugHandler struct { + container *restful.Container + manager inmemoryruntime.Manager + log logr.Logger + infoProvider DebugInfoProvider +} + +func (h *debugHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + h.container.ServeHTTP(w, r) +} + +func (h *debugHandler) listenersList(_ *restful.Request, resp *restful.Response) { + listeners := h.infoProvider.ListListeners() + + if err := resp.WriteEntity(listeners); err != nil { + _ = resp.WriteErrorString(http.StatusInternalServerError, err.Error()) + return + } +} diff --git a/test/infrastructure/tmp-to-be-deleted/server/api/doc.go b/test/infrastructure/tmp-to-be-deleted/server/api/doc.go new file mode 100644 index 0000000000..12af86a30c --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/server/api/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package api defines a set of Handlers to be used for implementing a fake API server, designed specifically +to make Cluster API and the Kubeadm Control Plane provider happy during a stress test (it is not complete or +production ready, and it will never be). +*/ +package api diff --git a/test/infrastructure/tmp-to-be-deleted/server/api/handler.go b/test/infrastructure/tmp-to-be-deleted/server/api/handler.go new file mode 100644 index 0000000000..7da9a2bcbc --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/server/api/handler.go @@ -0,0 +1,674 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/emicklei/go-restful/v3" + "github.com/go-logr/logr" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/apimachinery/pkg/util/httpstream/spdy" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/endpoints/metrics" + "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/server" + "k8s.io/client-go/tools/portforward" + "sigs.k8s.io/controller-runtime/pkg/client" + + inmemoryruntime "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/runtime" + inmemoryportforward "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/server/api/portforward" +) + +// ResourceGroupResolver defines a func that can identify which workloadCluster/resourceGroup a +// request targets. +type ResourceGroupResolver func(host string) (string, error) + +// NewAPIServerHandler returns an http.Handler for a fake API server. +func NewAPIServerHandler(manager inmemoryruntime.Manager, log logr.Logger, resolver ResourceGroupResolver) http.Handler { + apiServer := &apiServerHandler{ + container: restful.NewContainer(), + manager: manager, + log: log, + resourceGroupResolver: resolver, + requestInfoResolver: server.NewRequestInfoResolver(&server.Config{ + LegacyAPIGroupPrefixes: sets.NewString(server.DefaultLegacyAPIPrefix), + }), + } + + apiServer.container.Filter(apiServer.globalLogging) + + ws := new(restful.WebService) + ws.Consumes(runtime.ContentTypeJSON) + ws.Produces(runtime.ContentTypeJSON) + + // Health check + ws.Route(ws.GET("/").To(apiServer.healthz)) + + // Discovery endpoints + ws.Route(ws.GET("/api").To(apiServer.apiDiscovery)) + ws.Route(ws.GET("/api/v1").To(apiServer.apiV1Discovery)) + ws.Route(ws.GET("/apis").To(apiServer.apisDiscovery)) + ws.Route(ws.GET("/apis/{group}/{version}").To(apiServer.apisDiscovery)) + + // CRUD endpoints (global objects) + ws.Route(ws.POST("/api/v1/{resource}").Consumes(runtime.ContentTypeProtobuf).To(apiServer.apiV1Create)) + ws.Route(ws.GET("/api/v1/{resource}").If(isList).To(apiServer.apiV1List)) + ws.Route(ws.GET("/api/v1/{resource}").If(isWatch).To(apiServer.apiV1Watch)) + ws.Route(ws.GET("/api/v1/{resource}/{name}").To(apiServer.apiV1Get)) + ws.Route(ws.PUT("/api/v1/{resource}/{name}").Consumes(runtime.ContentTypeProtobuf).To(apiServer.apiV1Update)) + ws.Route(ws.PATCH("/api/v1/{resource}/{name}").Consumes(string(types.MergePatchType), string(types.StrategicMergePatchType)).To(apiServer.apiV1Patch)) + ws.Route(ws.DELETE("/api/v1/{resource}/{name}").Consumes(runtime.ContentTypeProtobuf, runtime.ContentTypeJSON).To(apiServer.apiV1Delete)) + + ws.Route(ws.POST("/apis/{group}/{version}/{resource}").Consumes(runtime.ContentTypeProtobuf).To(apiServer.apiV1Create)) + ws.Route(ws.GET("/apis/{group}/{version}/{resource}").If(isList).To(apiServer.apiV1List)) + ws.Route(ws.GET("/apis/{group}/{version}/{resource}").If(isWatch).To(apiServer.apiV1Watch)) + ws.Route(ws.GET("/apis/{group}/{version}/{resource}/{name}").To(apiServer.apiV1Get)) + ws.Route(ws.PUT("/apis/{group}/{version}/{resource}/{name}").Consumes(runtime.ContentTypeProtobuf).To(apiServer.apiV1Update)) + ws.Route(ws.PATCH("/apis/{group}/{version}/{resource}/{name}").Consumes(string(types.MergePatchType), string(types.StrategicMergePatchType)).To(apiServer.apiV1Patch)) + ws.Route(ws.DELETE("/apis/{group}/{version}/{resource}/{name}").Consumes(runtime.ContentTypeProtobuf, runtime.ContentTypeJSON).To(apiServer.apiV1Delete)) + + // CRUD endpoints (namespaced objects) + ws.Route(ws.POST("/api/v1/namespaces/{namespace}/{resource}").Consumes(runtime.ContentTypeProtobuf).To(apiServer.apiV1Create)) + ws.Route(ws.GET("/api/v1/namespaces/{namespace}/{resource}").If(isList).To(apiServer.apiV1List)) + ws.Route(ws.GET("/api/v1/namespaces/{namespace}/{resource}").If(isWatch).To(apiServer.apiV1Watch)) + ws.Route(ws.GET("/api/v1/namespaces/{namespace}/{resource}/{name}").To(apiServer.apiV1Get)) + ws.Route(ws.PUT("/api/v1/namespaces/{namespace}/{resource}/{name}").Consumes(runtime.ContentTypeProtobuf).To(apiServer.apiV1Update)) + ws.Route(ws.PATCH("/api/v1/namespaces/{namespace}/{resource}/{name}").Consumes(string(types.MergePatchType), string(types.StrategicMergePatchType)).To(apiServer.apiV1Patch)) + ws.Route(ws.DELETE("/api/v1/namespaces/{namespace}/{resource}/{name}").Consumes(runtime.ContentTypeProtobuf, runtime.ContentTypeJSON).To(apiServer.apiV1Delete)) + + ws.Route(ws.POST("/apis/{group}/{version}/namespaces/{namespace}/{resource}").Consumes(runtime.ContentTypeProtobuf).To(apiServer.apiV1Create)) + ws.Route(ws.GET("/apis/{group}/{version}/namespaces/{namespace}/{resource}").If(isList).To(apiServer.apiV1List)) + ws.Route(ws.GET("/apis/{group}/{version}/namespaces/{namespace}/{resource}").If(isWatch).To(apiServer.apiV1Watch)) + ws.Route(ws.GET("/apis/{group}/{version}/namespaces/{namespace}/{resource}/{name}").To(apiServer.apiV1Get)) + ws.Route(ws.PUT("/apis/{group}/{version}/namespaces/{namespace}/{resource}/{name}").Consumes(runtime.ContentTypeProtobuf).To(apiServer.apiV1Update)) + ws.Route(ws.PATCH("/apis/{group}/{version}/namespaces/{namespace}/{resource}/{name}").Consumes(string(types.MergePatchType), string(types.StrategicMergePatchType)).To(apiServer.apiV1Patch)) + ws.Route(ws.DELETE("/apis/{group}/{version}/namespaces/{namespace}/{resource}/{name}").Consumes(runtime.ContentTypeProtobuf, runtime.ContentTypeJSON).To(apiServer.apiV1Delete)) + + // Port forward endpoints + ws.Route(ws.GET("/api/v1/namespaces/{namespace}/pods/{name}/portforward").To(apiServer.apiV1PortForward)) + ws.Route(ws.POST("/api/v1/namespaces/{namespace}/pods/{name}/portforward").Consumes("*/*").To(apiServer.apiV1PortForward)) + + apiServer.container.Add(ws) + + return apiServer +} + +type apiServerHandler struct { + container *restful.Container + manager inmemoryruntime.Manager + log logr.Logger + resourceGroupResolver ResourceGroupResolver + requestInfoResolver *request.RequestInfoFactory +} + +func (h *apiServerHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + h.container.ServeHTTP(w, r) +} + +func (h *apiServerHandler) globalLogging(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) { + h.log.V(4).Info("Serving", "method", req.Request.Method, "url", req.Request.URL, "contentType", req.HeaderParameter("Content-Type")) + + start := time.Now() + + defer func() { + // Note: The following is based on k8s.io/apiserver/pkg/endpoints/metrics.MonitorRequest + requestInfo, err := h.requestInfoResolver.NewRequestInfo(req.Request) + if err != nil { + h.log.Error(err, "Couldn't get RequestInfo from request", "url", req.Request.URL) + requestInfo = &request.RequestInfo{Verb: req.Request.Method, Path: req.Request.URL.Path} + } + + // Base label values which are also available in upstream kube-apiserver metrics. + dryRun := cleanDryRun(req.Request.URL) + scope := metrics.CleanScope(requestInfo) + verb := metrics.CleanVerb(metrics.CanonicalVerb(strings.ToUpper(req.Request.Method), scope), req.Request, requestInfo) + component := metrics.APIServerComponent + baseLabelValues := []string{verb, dryRun, requestInfo.APIGroup, requestInfo.APIVersion, requestInfo.Resource, requestInfo.Subresource, scope, component} + requestTotalLabelValues := append(baseLabelValues, strconv.Itoa(resp.StatusCode())) + requestLatencyLabelValues := baseLabelValues + + // Additional CAPIM specific label values. + wclName, _ := h.resourceGroupResolver(req.Request.Host) + userAgent := req.Request.Header.Get("User-Agent") + requestTotalLabelValues = append(requestTotalLabelValues, req.Request.Method, req.Request.Host, req.SelectedRoutePath(), wclName, userAgent) + requestLatencyLabelValues = append(requestLatencyLabelValues, req.Request.Method, req.Request.Host, req.SelectedRoutePath(), wclName, userAgent) + + requestTotal.WithLabelValues(requestTotalLabelValues...).Inc() + requestLatency.WithLabelValues(requestLatencyLabelValues...).Observe(time.Since(start).Seconds()) + }() + + chain.ProcessFilter(req, resp) +} + +// cleanDryRun gets dryrun from a URL. +// Note: This is a copy of k8s.io/apiserver/pkg/endpoints/metrics.cleanDryRun. +func cleanDryRun(u *url.URL) string { + // avoid allocating when we don't see dryRun in the query + if !strings.Contains(u.RawQuery, "dryRun") { + return "" + } + dryRun := u.Query()["dryRun"] + if errs := validation.ValidateDryRun(nil, dryRun); len(errs) > 0 { + return "invalid" + } + // Since dryRun could be valid with any arbitrarily long length + // we have to dedup and sort the elements before joining them together + // TODO: this is a fairly large allocation for what it does, consider + // a sort and dedup in a single pass + return strings.Join(sets.NewString(dryRun...).List(), ",") +} + +func (h *apiServerHandler) apiDiscovery(_ *restful.Request, resp *restful.Response) { + if err := resp.WriteEntity(apiVersions); err != nil { + _ = resp.WriteErrorString(http.StatusInternalServerError, err.Error()) + return + } +} + +func (h *apiServerHandler) apiV1Discovery(_ *restful.Request, resp *restful.Response) { + if err := resp.WriteEntity(corev1APIResourceList); err != nil { + _ = resp.WriteErrorString(http.StatusInternalServerError, err.Error()) + return + } +} + +func (h *apiServerHandler) apisDiscovery(req *restful.Request, resp *restful.Response) { + if req.PathParameter("group") != "" { + if req.PathParameter("group") == "rbac.authorization.k8s.io" && req.PathParameter("version") == "v1" { + if err := resp.WriteEntity(rbacv1APIResourceList); err != nil { + _ = resp.WriteErrorString(http.StatusInternalServerError, err.Error()) + return + } + return + } + if req.PathParameter("group") == "apps" && req.PathParameter("version") == "v1" { + if err := resp.WriteEntity(appsV1ResourceList); err != nil { + _ = resp.WriteErrorString(http.StatusInternalServerError, err.Error()) + return + } + return + } + + _ = resp.WriteErrorString(http.StatusInternalServerError, fmt.Sprintf("discovery info not defined for %s/%s", req.PathParameter("group"), req.PathParameter("version"))) + return + } + + if err := resp.WriteEntity(apiGroupList); err != nil { + _ = resp.WriteErrorString(http.StatusInternalServerError, err.Error()) + return + } +} + +func (h *apiServerHandler) apiV1Create(req *restful.Request, resp *restful.Response) { + ctx := req.Request.Context() + + // Gets the resource group the request targets (the resolver is aware of the mapping host<->resourceGroup) + resourceGroup, err := h.resourceGroupResolver(req.Request.Host) + if err != nil { + _ = resp.WriteErrorString(http.StatusInternalServerError, err.Error()) + return + } + + // Gets at client to the resource group. + inmemoryClient := h.manager.GetResourceGroup(resourceGroup).GetClient() + + // Maps the requested resource to a gvk. + gvk, err := requestToGVK(req) + if err != nil { + _ = resp.WriteErrorString(http.StatusInternalServerError, err.Error()) + return + } + + // Gets the obj from the request. + defer func() { _ = req.Request.Body.Close() }() + // TODO: should we really ignore this error? + objData, _ := io.ReadAll(req.Request.Body) + + newObj, err := h.manager.GetScheme().New(*gvk) + if err != nil { + _ = resp.WriteErrorString(http.StatusInternalServerError, err.Error()) + return + } + + codecFactory := serializer.NewCodecFactory(h.manager.GetScheme()) + if err := runtime.DecodeInto(codecFactory.UniversalDecoder(), objData, newObj); err != nil { + _ = resp.WriteErrorString(http.StatusInternalServerError, err.Error()) + return + } + + // Create the object + obj := newObj.(client.Object) + // TODO: consider check vs enforce for namespace on the object - namespace on the request path + obj.SetNamespace(req.PathParameter("namespace")) + if err := inmemoryClient.Create(ctx, obj); err != nil { + if status, ok := err.(apierrors.APIStatus); ok || errors.As(err, &status) { + _ = resp.WriteHeaderAndEntity(int(status.Status().Code), status) + return + } + _ = resp.WriteHeaderAndEntity(http.StatusInternalServerError, err.Error()) + return + } + if err := resp.WriteEntity(obj); err != nil { + _ = resp.WriteErrorString(http.StatusInternalServerError, err.Error()) + return + } +} + +func (h *apiServerHandler) apiV1List(req *restful.Request, resp *restful.Response) { + ctx := req.Request.Context() + + // Gets the resource group the request targets (the resolver is aware of the mapping host<->resourceGroup) + resourceGroup, err := h.resourceGroupResolver(req.Request.Host) + if err != nil { + _ = resp.WriteErrorString(http.StatusInternalServerError, err.Error()) + return + } + + // Gets at client to the resource group. + inmemoryClient := h.manager.GetResourceGroup(resourceGroup).GetClient() + + // Maps the requested resource to a gvk. + gvk, err := requestToGVK(req) + if err != nil { + _ = resp.WriteErrorString(http.StatusInternalServerError, err.Error()) + return + } + + // Reads and returns the requested data. + list := &unstructured.UnstructuredList{} + list.SetAPIVersion(gvk.GroupVersion().String()) + list.SetKind(fmt.Sprintf("%sList", gvk.Kind)) + + listOpts := []client.ListOption{} + if req.PathParameter("namespace") != "" { + listOpts = append(listOpts, client.InNamespace(req.PathParameter("namespace"))) + } + + // TODO: The only field selector which works is for `spec.nodeName` on pods. + selector, err := fields.ParseSelector(req.QueryParameter("fieldSelector")) + if err != nil { + _ = resp.WriteErrorString(http.StatusInternalServerError, err.Error()) + return + } + if selector != nil { + listOpts = append(listOpts, client.MatchingFieldsSelector{Selector: selector}) + } + + if err := inmemoryClient.List(ctx, list, listOpts...); err != nil { + if status, ok := err.(apierrors.APIStatus); ok || errors.As(err, &status) { + _ = resp.WriteHeaderAndEntity(int(status.Status().Code), status) + return + } + _ = resp.WriteHeaderAndEntity(http.StatusInternalServerError, err.Error()) + return + } + if err := resp.WriteEntity(list); err != nil { + _ = resp.WriteErrorString(http.StatusInternalServerError, err.Error()) + return + } +} + +func (h *apiServerHandler) apiV1Watch(req *restful.Request, resp *restful.Response) { + // Gets the resource group the request targets (the resolver is aware of the mapping host<->resourceGroup) + resourceGroup, err := h.resourceGroupResolver(req.Request.Host) + if err != nil { + _ = resp.WriteErrorString(http.StatusInternalServerError, err.Error()) + return + } + + // Maps the requested resource to a gvk. + gvk, err := requestToGVK(req) + if err != nil { + _ = resp.WriteErrorString(http.StatusInternalServerError, err.Error()) + return + } + + // If the request is a Watch handle it using watchForResource. + err = h.watchForResource(req, resp, resourceGroup, *gvk) + if err != nil { + _ = resp.WriteErrorString(http.StatusInternalServerError, err.Error()) + return + } +} + +func (h *apiServerHandler) apiV1Get(req *restful.Request, resp *restful.Response) { + ctx := req.Request.Context() + + // Gets the resource group the request targets (the resolver is aware of the mapping host<->resourceGroup) + resourceGroup, err := h.resourceGroupResolver(req.Request.Host) + if err != nil { + _ = resp.WriteErrorString(http.StatusInternalServerError, err.Error()) + return + } + + // Gets at client to the resource group. + inmemoryClient := h.manager.GetResourceGroup(resourceGroup).GetClient() + + // Maps the requested resource to a gvk. + gvk, err := requestToGVK(req) + if err != nil { + _ = resp.WriteErrorString(http.StatusInternalServerError, err.Error()) + return + } + + // Reads and returns the requested data. + obj := &unstructured.Unstructured{} + obj.SetAPIVersion(gvk.GroupVersion().String()) + obj.SetKind(gvk.Kind) + obj.SetName(req.PathParameter("name")) + obj.SetNamespace(req.PathParameter("namespace")) + + if err := inmemoryClient.Get(ctx, client.ObjectKeyFromObject(obj), obj); err != nil { + if status, ok := err.(apierrors.APIStatus); ok || errors.As(err, &status) { + _ = resp.WriteHeaderAndEntity(int(status.Status().Code), status) + return + } + _ = resp.WriteHeaderAndEntity(http.StatusInternalServerError, err.Error()) + return + } + if err := resp.WriteEntity(obj); err != nil { + _ = resp.WriteErrorString(http.StatusInternalServerError, err.Error()) + return + } +} + +func (h *apiServerHandler) apiV1Update(req *restful.Request, resp *restful.Response) { + ctx := req.Request.Context() + + // Gets the resource group the request targets (the resolver is aware of the mapping host<->resourceGroup) + resourceGroup, err := h.resourceGroupResolver(req.Request.Host) + if err != nil { + _ = resp.WriteErrorString(http.StatusInternalServerError, err.Error()) + return + } + + // Gets at client to the resource group. + inmemoryClient := h.manager.GetResourceGroup(resourceGroup).GetClient() + + // Maps the requested resource to a gvk. + gvk, err := requestToGVK(req) + if err != nil { + _ = resp.WriteErrorString(http.StatusInternalServerError, err.Error()) + return + } + + // Gets the obj from the request. + defer func() { _ = req.Request.Body.Close() }() + // TODO: should we really ignore this error? + objData, _ := io.ReadAll(req.Request.Body) + + newObj, err := h.manager.GetScheme().New(*gvk) + if err != nil { + _ = resp.WriteErrorString(http.StatusInternalServerError, err.Error()) + return + } + + codecFactory := serializer.NewCodecFactory(h.manager.GetScheme()) + if err := runtime.DecodeInto(codecFactory.UniversalDecoder(), objData, newObj); err != nil { + _ = resp.WriteErrorString(http.StatusInternalServerError, err.Error()) + return + } + + // Update the object + obj := newObj.(client.Object) + // TODO: consider check vs enforce for namespace on the object - namespace on the request path + obj.SetNamespace(req.PathParameter("namespace")) + if err := inmemoryClient.Update(ctx, obj); err != nil { + if status, ok := err.(apierrors.APIStatus); ok || errors.As(err, &status) { + _ = resp.WriteHeaderAndEntity(int(status.Status().Code), status) + return + } + _ = resp.WriteHeaderAndEntity(http.StatusInternalServerError, err.Error()) + return + } + if err := resp.WriteEntity(obj); err != nil { + _ = resp.WriteErrorString(http.StatusInternalServerError, err.Error()) + return + } +} + +func (h *apiServerHandler) apiV1Patch(req *restful.Request, resp *restful.Response) { + ctx := req.Request.Context() + + // Gets the resource group the request targets (the resolver is aware of the mapping host<->resourceGroup) + resourceGroup, err := h.resourceGroupResolver(req.Request.Host) + if err != nil { + _ = resp.WriteErrorString(http.StatusInternalServerError, err.Error()) + return + } + + // Gets at client to the resource group. + inmemoryClient := h.manager.GetResourceGroup(resourceGroup).GetClient() + + // Maps the requested resource to a gvk. + gvk, err := requestToGVK(req) + if err != nil { + _ = resp.WriteErrorString(http.StatusInternalServerError, err.Error()) + return + } + + // Gets the patch from the request + defer func() { _ = req.Request.Body.Close() }() + // TODO: should we really ignore this error? + patchData, _ := io.ReadAll(req.Request.Body) + patchType := types.PatchType(req.HeaderParameter("Content-Type")) + patch := client.RawPatch(patchType, patchData) + + // Apply the Patch. + obj := &unstructured.Unstructured{} + // TODO: consider check vs enforce for gvk on the object - gvk on the request path (same for name/namespace) + obj.SetAPIVersion(gvk.GroupVersion().String()) + obj.SetKind(gvk.Kind) + obj.SetName(req.PathParameter("name")) + obj.SetNamespace(req.PathParameter("namespace")) + + if err := inmemoryClient.Get(ctx, client.ObjectKeyFromObject(obj), obj); err != nil { + _ = resp.WriteErrorString(http.StatusInternalServerError, err.Error()) + return + } + if err := inmemoryClient.Patch(ctx, obj, patch); err != nil { + if status, ok := err.(apierrors.APIStatus); ok || errors.As(err, &status) { + _ = resp.WriteHeaderAndEntity(int(status.Status().Code), status) + return + } + _ = resp.WriteHeaderAndEntity(http.StatusInternalServerError, err.Error()) + return + } + if err := resp.WriteEntity(obj); err != nil { + _ = resp.WriteErrorString(http.StatusInternalServerError, err.Error()) + return + } +} + +func (h *apiServerHandler) apiV1Delete(req *restful.Request, resp *restful.Response) { + ctx := req.Request.Context() + + // Gets the resource group the request targets (the resolver is aware of the mapping host<->resourceGroup) + resourceGroup, err := h.resourceGroupResolver(req.Request.Host) + if err != nil { + _ = resp.WriteErrorString(http.StatusInternalServerError, err.Error()) + return + } + + // Gets at client to the resource group. + inmemoryClient := h.manager.GetResourceGroup(resourceGroup).GetClient() + + // Maps the requested resource to a gvk. + gvk, err := requestToGVK(req) + if err != nil { + _ = resp.WriteErrorString(http.StatusInternalServerError, err.Error()) + return + } + + // Reads and returns the requested data. + obj := &unstructured.Unstructured{} + obj.SetAPIVersion(gvk.GroupVersion().String()) + obj.SetKind(gvk.Kind) + obj.SetName(req.PathParameter("name")) + obj.SetNamespace(req.PathParameter("namespace")) + + if err := inmemoryClient.Delete(ctx, obj); err != nil { + if status, ok := err.(apierrors.APIStatus); ok || errors.As(err, &status) { + _ = resp.WriteHeaderAndEntity(int(status.Status().Code), status) + return + } + _ = resp.WriteHeaderAndEntity(http.StatusInternalServerError, err.Error()) + return + } +} + +func (h *apiServerHandler) apiV1PortForward(req *restful.Request, resp *restful.Response) { + // In order to handle a port forward request the current connection has to be upgraded + // to become compliant with the SPDY protocol. + // This implies two steps: + // - Adding support for handling multiple http streams, used for subsequent operations over + // the forwarded connection. + // - Opening a connection to the target endpoint, the endpoint to port forward to, and setting up + // a bidirectional copy of data because the server acts as a man in the middle. + + podName := req.PathParameter("name") + podNamespace := req.PathParameter("namespace") + + // Perform a sub protocol negotiation, ensuring that client and server agree on how + // to handle communications over the port forwarded connection. + request := req.Request + respWriter := resp.ResponseWriter + _, err := httpstream.Handshake(request, respWriter, []string{portforward.PortForwardProtocolV1Name}) + if err != nil { + _ = resp.WriteErrorString(http.StatusInternalServerError, err.Error()) + return + } + + // Create a channel to handle http streams that will be generated for each subsequent + // operations over the port forwarded connection. + streamChan := make(chan httpstream.Stream, 1) + + // Upgrade the connection specifying what to do when a new http stream is received. + // After being received, the new stream will be published into the stream channel for handling. + upgrader := spdy.NewResponseUpgrader() + conn := upgrader.UpgradeResponse(respWriter, request, inmemoryportforward.HTTPStreamReceived(streamChan)) + if conn == nil { + _ = resp.WriteErrorString(http.StatusInternalServerError, "failed to get upgraded connection") + return + } + defer func() { + _ = conn.Close() + }() + conn.SetIdleTimeout(10 * time.Minute) + + // Start the process handling streams that are published in the stream channel, please note that: + // - The connection with the target will be established only when the first operation will be executed + // - Following operations will re-use the same connection. + streamHandler := inmemoryportforward.NewHTTPStreamHandler( + conn, + streamChan, + podName, + podNamespace, + func(ctx context.Context, podName, podNamespace, _ string, stream io.ReadWriteCloser) error { + // Given that in the in-memory provider there is no real infrastructure, and thus no real workload cluster, + // we are going to forward all the connection back to the same server (the CAPIM controller pod). + return h.doPortForward(ctx, req.Request.Host, stream) + }, + ) + + // TODO: Consider using req.Request.Context() + streamHandler.Run(context.Background()) +} + +// doPortForward establish a connection to the target of the port forward operation, and sets up +// a bidirectional copy of data. +// In the case of this provider, the target endpoint is always on the same server (the CAPIM controller pod). +func (h *apiServerHandler) doPortForward(ctx context.Context, address string, stream io.ReadWriteCloser) error { + // Get a connection to the target of the port forward operation. + dial, err := net.Dial("tcp", address) + if err != nil { + return fmt.Errorf("failed to dial %q: %w", address, err) + } + defer func() { + _ = dial.Close() + }() + + // Create a tunnel for bidirectional copy of data between the stream + // originated from the initiator of the port forward operation and the target. + return inmemoryportforward.HTTPStreamTunnel(ctx, stream, dial) +} + +func (h *apiServerHandler) healthz(_ *restful.Request, resp *restful.Response) { + resp.WriteHeader(http.StatusOK) +} + +func requestToGVK(req *restful.Request) (*schema.GroupVersionKind, error) { + resourceList := getAPIResourceList(req) + if resourceList == nil { + return nil, fmt.Errorf("no APIResourceList defined for %s", req.PathParameters()) + } + gv, err := schema.ParseGroupVersion(resourceList.GroupVersion) + if err != nil { + return nil, fmt.Errorf("invalid group version in APIResourceList: %s", resourceList.GroupVersion) + } + + resource := req.PathParameter("resource") + for _, r := range resourceList.APIResources { + if r.Name == resource { + gvk := gv.WithKind(r.Kind) + return &gvk, nil + } + } + return nil, fmt.Errorf("resource %s is not defined in the APIResourceList for %s", resource, req.PathParameters()) +} + +func getAPIResourceList(req *restful.Request) *metav1.APIResourceList { + if req.PathParameter("group") != "" { + if req.PathParameter("group") == "rbac.authorization.k8s.io" && req.PathParameter("version") == "v1" { + return rbacv1APIResourceList + } + if req.PathParameter("group") == "apps" && req.PathParameter("version") == "v1" { + return appsV1ResourceList + } + return nil + } + return corev1APIResourceList +} + +// isWatch is true if the request contains `watch="true"` as a query parameter. +func isWatch(req *http.Request) bool { + return req.URL.Query().Get("watch") == "true" +} + +// isList is true if the request does not have `watch="true` as a query parameter. +func isList(req *http.Request) bool { + return !isWatch(req) +} diff --git a/test/infrastructure/tmp-to-be-deleted/server/api/metrics.go b/test/infrastructure/tmp-to-be-deleted/server/api/metrics.go new file mode 100644 index 0000000000..f1ec3b41cf --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/server/api/metrics.go @@ -0,0 +1,54 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "github.com/prometheus/client_golang/prometheus" + ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" +) + +func init() { + // Register the metrics at the controller-runtime metrics registry. + ctrlmetrics.Registry.MustRegister(requestTotal) + ctrlmetrics.Registry.MustRegister(requestLatency) +} + +var ( + // requestTotal reports request results. + requestTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "capim_apiserver_request_total", + Help: "Number of HTTP requests", + }, []string{ + // Labels from the kube-apiserver apiserver_request_total metric. + "verb", "dry_run", "group", "version", "resource", "subresource", "scope", "component", "code", + // Additional CAPIM labels. + "method", "host", "path", "cluster_name", "user_agent", + }) + + requestLatency = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "capim_apiserver_request_duration_seconds", + Help: "Request latency in seconds.", + Buckets: []float64{0.005, 0.025, 0.05, 0.1, 0.2, 0.4, 0.6, 0.8, 1.0, 1.25, 1.5, 2, 3, + 4, 5, 6, 8, 10, 15, 20, 30, 45, 60}, + }, []string{ + // Labels from the kube-apiserver apiserver_request_total metric. + "verb", "dry_run", "group", "version", "resource", "subresource", "scope", "component", + // Additional CAPIM labels. + "method", "host", "path", "cluster_name", "user_agent", + }) +) diff --git a/test/infrastructure/tmp-to-be-deleted/server/api/portforward/doc.go b/test/infrastructure/tmp-to-be-deleted/server/api/portforward/doc.go new file mode 100644 index 0000000000..8a5375ec5a --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/server/api/portforward/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package portforward implements support for implementing a fake port forward service in the api. + +The implementation is heavily inspired from https://github.com/kubernetes-sigs/kwok/blob/main/pkg/kwok/server/portforward/httpstream.go +(kudos to the team there!) +*/ +package portforward diff --git a/test/infrastructure/tmp-to-be-deleted/server/api/portforward/httpstreams.go b/test/infrastructure/tmp-to-be-deleted/server/api/portforward/httpstreams.go new file mode 100644 index 0000000000..2ded9e663e --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/server/api/portforward/httpstreams.go @@ -0,0 +1,321 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package portforward + +import ( + "context" + "fmt" + "io" + "strconv" + "sync" + "time" + + "github.com/go-logr/logr" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/klog/v2" +) + +// HTTPStreamReceived is the httpstream.NewStreamHandler for port +// forward streams. Each valid stream is sent to the streams channel. +func HTTPStreamReceived(streamsCh chan httpstream.Stream) func(httpstream.Stream, <-chan struct{}) error { + return func(stream httpstream.Stream, replySent <-chan struct{}) error { + // make sure it has a valid stream type header + streamType := stream.Headers().Get(corev1.StreamType) + if streamType == "" { + return fmt.Errorf("%q header is required", corev1.StreamType) + } + if streamType != corev1.StreamTypeError && streamType != corev1.StreamTypeData { + return fmt.Errorf("invalid stream type %q", streamType) + } + + streamsCh <- stream + return nil + } +} + +// NewHTTPStreamHandler returns a new httpStreamHandler capable of processing multiple port forward +// operations over a single httpstream.Connection. +func NewHTTPStreamHandler(conn httpstream.Connection, streamsCh chan httpstream.Stream, podName, podNamespace string, forwarder PortForwarder) HTTPStreamHandler { + return &httpStreamHandler{ + conn: conn, + streamChan: streamsCh, + streamPairs: make(map[string]*httpStreamPair), + streamCreationTimeout: 30 * time.Second, + podName: podName, + podNamespace: podNamespace, + forwarder: forwarder, + } +} + +// HTTPStreamHandler is capable of processing multiple port forward +// requests over a single httpstream.Connection. +type HTTPStreamHandler interface { + Run(ctx context.Context) +} + +// httpStreamHandler is capable of processing multiple port forward +// requests over a single httpstream.Connection. +type httpStreamHandler struct { + // TODO: consider setting log. + log logr.Logger + conn httpstream.Connection + streamChan chan httpstream.Stream + streamPairsLock sync.RWMutex + streamPairs map[string]*httpStreamPair + streamCreationTimeout time.Duration + podName string + podNamespace string + forwarder PortForwarder +} + +// PortForwarder knows how to forward content from a data stream to/from a target (usually a port in a pod). +type PortForwarder func(ctx context.Context, podName, podNamespace, port string, stream io.ReadWriteCloser) error + +// Run is the main loop for the HTTPStreamHandler. It processes new +// streams, invoking portForward for each complete stream pair. The loop exits +// when the httpstream.Connection is closed. +// +// Notes: +// - two streams for each operation over the port forward connection, the data stream and the error stream; +// both streams can be identified by using the requestID. +// - it is required to wait for both the stream before stating the actual part forward. +// - streams pair are kept around until the operation completes. +func (h *httpStreamHandler) Run(ctx context.Context) { + h.log.V(4).Info("Port-forward: connection waiting for streams", "Pod", klog.KRef(h.podNamespace, h.podName)) +Loop: + for { + select { + case <-h.conn.CloseChan(): + h.log.V(4).Info("Port-forward: connection closed", "Pod", klog.KRef(h.podNamespace, h.podName)) + break Loop + case stream := <-h.streamChan: + requestID := h.requestID(stream) + streamType := stream.Headers().Get(corev1.StreamType) + h.log.V(4).Info("Port-forward: connection request received new type of stream", "Pod", klog.KRef(h.podNamespace, h.podName), "request", requestID, "streamType", streamType) + + p, created := h.getStreamPair(requestID) + if created { + go h.monitorStreamPair(p, time.After(h.streamCreationTimeout)) + } + if complete, err := p.add(stream); err != nil { + h.log.Error(err, "Port-forward: error processing stream", "Pod", klog.KRef(h.podNamespace, h.podName), "request", requestID, "streamType", streamType) + err := fmt.Errorf("error processing stream for request %s: %w", requestID, err) + p.printError(err.Error()) + } else if complete { + go h.portForward(ctx, p) + } + } + } +} + +// requestID returns the request id for stream. +func (h *httpStreamHandler) requestID(stream httpstream.Stream) string { + requestID := stream.Headers().Get(corev1.PortForwardRequestIDHeader) + if requestID == "" { + h.log.V(4).Info("Port-forward: connection stream received without requestID header", "Pod", klog.KRef(h.podNamespace, h.podName)) + // If we get here, it's because the connection came from an older client + // that isn't generating the request id header + // (https://github.com/kubernetes/kubernetes/blob/843134885e7e0b360eb5441e85b1410a8b1a7a0c/pkg/client/unversioned/portforward/portforward.go#L258-L287) + // + // This is a best-effort attempt at supporting older clients. + // + // When there aren't concurrent new forwarded connections, each connection + // will have a pair of streams (data, error), and the stream IDs will be + // consecutive odd numbers, e.g. 1 and 3 for the first connection. Convert + // the stream ID into a pseudo-request id by taking the stream type and + // using id = stream.Identifier() when the stream type is error, + // and id = stream.Identifier() - 2 when it's data. + // + // NOTE: this only works when there are not concurrent new streams from + // multiple forwarded connections; it's a best-effort attempt at supporting + // old clients that don't generate request ids. If there are concurrent + // new connections, it's possible that 1 connection gets streams whose IDs + // are not consecutive (e.g. 5 and 9 instead of 5 and 7). + streamType := stream.Headers().Get(corev1.StreamType) + switch streamType { + case corev1.StreamTypeError: + requestID = strconv.Itoa(int(stream.Identifier())) + case corev1.StreamTypeData: + requestID = strconv.Itoa(int(stream.Identifier()) - 2) + } + h.log.V(4).Info("Port-forward: connection automatically assigning request ID from stream type and stream ID", "Pod", klog.KRef(h.podNamespace, h.podName), "request", requestID, "streamType", streamType, "stream", stream.Identifier()) + } + return requestID +} + +// getStreamPair returns a httpStreamPair for requestID. This creates a +// new pair if one does not yet exist for the requestID. The returned bool is +// true if the pair was created. +func (h *httpStreamHandler) getStreamPair(requestID string) (*httpStreamPair, bool) { + h.streamPairsLock.Lock() + defer h.streamPairsLock.Unlock() + + if p, ok := h.streamPairs[requestID]; ok { + h.log.V(4).Info("Port-forward: connection request found existing stream pair", "Pod", klog.KRef(h.podNamespace, h.podName), "request", requestID) + return p, false + } + + h.log.V(4).Info("Port-forward: connection request creating new stream pair", "Pod", klog.KRef(h.podNamespace, h.podName), "request", requestID) + + p := newPortForwardPair(requestID) + h.streamPairs[requestID] = p + + return p, true +} + +// monitorStreamPair waits for the pair to receive both its error and data +// streams, or for the timeout to expire (whichever happens first), and then +// removes the pair. +func (h *httpStreamHandler) monitorStreamPair(p *httpStreamPair, timeout <-chan time.Time) { + select { + case <-timeout: + err := fmt.Errorf("(conn=%v, request=%s) timed out waiting for streams", h.conn, p.requestID) + h.log.Error(err, "Port-forward: error processing stream", "Pod", klog.KRef(h.podNamespace, h.podName), "request", p.requestID) + p.printError(err.Error()) + case <-p.complete: + h.log.V(4).Info("Port-forward: connection request successfully received error and data streams", "Pod", klog.KRef(h.podNamespace, h.podName), "request", p.requestID) + } + h.removeStreamPair(p.requestID) +} + +// removeStreamPair removes the stream pair identified by requestID from streamPairs. +func (h *httpStreamHandler) removeStreamPair(requestID string) { + h.streamPairsLock.Lock() + defer h.streamPairsLock.Unlock() + + if h.conn != nil { + pair := h.streamPairs[requestID] + h.conn.RemoveStreams(pair.dataStream, pair.errorStream) + } + delete(h.streamPairs, requestID) +} + +// portForward invokes the HTTPStreamHandler's forwarder.PortForward +// function for the given stream pair. +func (h *httpStreamHandler) portForward(ctx context.Context, p *httpStreamPair) { + defer func() { + _ = p.errorStream.Close() + _ = p.dataStream.Close() + }() + + port := p.dataStream.Headers().Get(corev1.PortHeader) + + h.log.Info("Port-forward: connection request invoking forwarder.PortForward", "Pod", klog.KRef(h.podNamespace, h.podName), "request", p.requestID, "port", port) + err := h.forwarder(ctx, h.podName, h.podNamespace, port, p.dataStream) + h.log.V(4).Info("Port-forward: connection request done invoking forwarder.PortForward", "Pod", klog.KRef(h.podNamespace, h.podName), "request", p.requestID, "port", port) + + if err != nil { + err := fmt.Errorf("error forwarding port %s to pod %s/%s: %w", port, h.podNamespace, h.podName, err) + h.log.Error(err, "Port-forward: error processing request", "Pod", klog.KRef(h.podNamespace, h.podName), "request", p.requestID) + fmt.Fprint(p.errorStream, err.Error()) + } +} + +// httpStreamPair represents the error and data streams for a port +// forwarding request. +type httpStreamPair struct { + lock sync.RWMutex + requestID string + dataStream httpstream.Stream + errorStream httpstream.Stream + complete chan struct{} +} + +// newPortForwardPair creates a new httpStreamPair. +func newPortForwardPair(requestID string) *httpStreamPair { + return &httpStreamPair{ + requestID: requestID, + complete: make(chan struct{}), + } +} + +// add adds the stream to the httpStreamPair. If the pair already +// contains a stream for the new stream's type, an error is returned. add +// returns true if both the data and error streams for this pair have been +// received. +func (p *httpStreamPair) add(stream httpstream.Stream) (bool, error) { + p.lock.Lock() + defer p.lock.Unlock() + + switch stream.Headers().Get(corev1.StreamType) { + case corev1.StreamTypeError: + if p.errorStream != nil { + return false, fmt.Errorf("error stream already assigned") + } + p.errorStream = stream + case corev1.StreamTypeData: + if p.dataStream != nil { + return false, fmt.Errorf("data stream already assigned") + } + p.dataStream = stream + } + + complete := p.errorStream != nil && p.dataStream != nil + if complete { + close(p.complete) + } + return complete, nil +} + +// printError writes s to p.errorStream if p.errorStream has been set. +func (p *httpStreamPair) printError(s string) { + p.lock.RLock() + defer p.lock.RUnlock() + if p.errorStream != nil { + fmt.Fprint(p.errorStream, s) + } +} + +// HTTPStreamTunnel create tunnels for two streams. +func HTTPStreamTunnel(ctx context.Context, c1, c2 io.ReadWriter) error { + buf1 := make([]byte, 32*1024) // TODO: check if we can make smaller buffers + buf2 := make([]byte, 32*1024) + + errCh := make(chan error) + go func() { + _, err := io.CopyBuffer(c2, c1, buf1) + errCh <- err + }() + go func() { + _, err := io.CopyBuffer(c1, c2, buf2) + errCh <- err + }() + select { + case <-ctx.Done(): + // Do nothing + case err1 := <-errCh: + select { + case <-ctx.Done(): + if err1 != nil { + return err1 + } + // Do nothing + case err2 := <-errCh: + if err1 != nil { + // TODO: Consider aggregating errors + return err1 + } + return err2 + } + } + if err := ctx.Err(); err != nil && !errors.Is(err, context.Canceled) { + return err + } + return nil +} diff --git a/test/infrastructure/tmp-to-be-deleted/server/api/watch.go b/test/infrastructure/tmp-to-be-deleted/server/api/watch.go new file mode 100644 index 0000000000..00e2395799 --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/server/api/watch.go @@ -0,0 +1,186 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/emicklei/go-restful/v3" + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Event records a lifecycle event for a Kubernetes object. +type Event struct { + Type watch.EventType `json:"type,omitempty"` + Object runtime.Object `json:"object,omitempty"` +} + +// WatchEventDispatcher dispatches events for a single resourceGroup. +type WatchEventDispatcher struct { + resourceGroup string + events chan *Event +} + +// OnCreate dispatches Create events. +func (m *WatchEventDispatcher) OnCreate(resourceGroup string, o client.Object) { + if resourceGroup != m.resourceGroup { + return + } + m.events <- &Event{ + Type: watch.Added, + Object: o, + } +} + +// OnUpdate dispatches Update events. +func (m *WatchEventDispatcher) OnUpdate(resourceGroup string, _, o client.Object) { + if resourceGroup != m.resourceGroup { + return + } + m.events <- &Event{ + Type: watch.Modified, + Object: o, + } +} + +// OnDelete dispatches Delete events. +func (m *WatchEventDispatcher) OnDelete(resourceGroup string, o client.Object) { + if resourceGroup != m.resourceGroup { + return + } + m.events <- &Event{ + Type: watch.Deleted, + Object: o, + } +} + +// OnGeneric dispatches Generic events. +func (m *WatchEventDispatcher) OnGeneric(resourceGroup string, o client.Object) { + if resourceGroup != m.resourceGroup { + return + } + m.events <- &Event{ + Type: "GENERIC", + Object: o, + } +} + +func (h *apiServerHandler) watchForResource(req *restful.Request, resp *restful.Response, resourceGroup string, gvk schema.GroupVersionKind) (reterr error) { + ctx := req.Request.Context() + queryTimeout := req.QueryParameter("timeoutSeconds") + c := h.manager.GetCache() + i, err := c.GetInformerForKind(ctx, gvk) + if err != nil { + return err + } + h.log.Info(fmt.Sprintf("Serving Watch for %v", req.Request.URL)) + // With an unbuffered event channel RemoveEventHandler could be blocked because it requires a lock on the informer. + // When Run stops reading from the channel the informer could be blocked with an unbuffered chanel and then RemoveEventHandler never goes through. + // 1000 is used to avoid deadlocks in clusters with a higher number of Machines/Nodes. + events := make(chan *Event, 1000) + watcher := &WatchEventDispatcher{ + resourceGroup: resourceGroup, + events: events, + } + + if err := i.AddEventHandler(watcher); err != nil { + return err + } + + // Defer cleanup which removes the event handler and ensures the channel is empty of events. + defer func() { + // Doing this to ensure the channel is empty. + // This reduces the probability of a deadlock when removing the event handler. + L: + for { + select { + case <-events: + default: + break L + } + } + reterr = i.RemoveEventHandler(watcher) + // Note: After we removed the handler, no new events will be written to the events channel. + }() + + return watcher.Run(ctx, queryTimeout, resp) +} + +// Run serves a series of encoded events via HTTP with Transfer-Encoding: chunked. +func (m *WatchEventDispatcher) Run(ctx context.Context, timeout string, w http.ResponseWriter) error { + flusher, ok := w.(http.Flusher) + if !ok { + return errors.New("can't start Watch: can't get http.Flusher") + } + resp, ok := w.(*restful.Response) + if !ok { + return errors.New("can't start Watch: can't get restful.Response") + } + w.Header().Set("Transfer-Encoding", "chunked") + w.WriteHeader(http.StatusOK) + flusher.Flush() + + timeoutTimer, seconds, err := setTimer(timeout) + if err != nil { + return errors.Wrapf(err, "can't start Watch: could not set timeout") + } + + ctx, cancel := context.WithTimeout(ctx, seconds) + defer cancel() + defer timeoutTimer.Stop() + for { + select { + case <-ctx.Done(): + return nil + case <-timeoutTimer.C: + return nil + case event, ok := <-m.events: + if !ok { + // End of results. + return nil + } + if err := resp.WriteEntity(event); err != nil { + _ = resp.WriteErrorString(http.StatusInternalServerError, err.Error()) + } + if len(m.events) == 0 { + flusher.Flush() + } + } + } +} + +// setTimer creates a time.Timer with the passed `timeout` or a default timeout of 120 seconds if `timeout` is empty. +func setTimer(timeout string) (*time.Timer, time.Duration, error) { + var defaultTimeout = 120 * time.Second + if timeout == "" { + t := time.NewTimer(defaultTimeout) + return t, defaultTimeout, nil + } + seconds, err := time.ParseDuration(fmt.Sprintf("%ss", timeout)) + if err != nil { + return nil, 0, errors.Wrapf(err, "Could not parse request timeout %s", timeout) + } + t := time.NewTimer(seconds) + return t, seconds, nil +} diff --git a/test/infrastructure/tmp-to-be-deleted/server/certs.go b/test/infrastructure/tmp-to-be-deleted/server/certs.go new file mode 100644 index 0000000000..05c7415570 --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/server/certs.go @@ -0,0 +1,101 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "crypto/rsa" + "crypto/x509" + "net" + + "github.com/pkg/errors" + "sigs.k8s.io/cluster-api/util/certs" +) + +var key *rsa.PrivateKey + +func init() { + // Create a private key only once, since this is a slow operation and it is ok + // to reuse it for all the certificates in a test provider. + var err error + key, err = certs.NewPrivateKey() + if err != nil { + panic(errors.Wrap(err, "unable to create private key").Error()) + } +} + +func newCertAndKey(caCert *x509.Certificate, caKey *rsa.PrivateKey, config *certs.Config) (*x509.Certificate, *rsa.PrivateKey, error) { + cert, err := config.NewSignedCert(key, caCert, caKey) + if err != nil { + return nil, nil, errors.Wrap(err, "unable to create certificate") + } + + return cert, key, nil +} + +// apiServerCertificateConfig returns the config for an API server serving certificate. +func apiServerCertificateConfig(controlPlaneIP string) *certs.Config { + altNames := &certs.AltNames{ + DNSNames: []string{ + // NOTE: DNS names for the kubernetes service are not required (the API + // server will never be accessed via the service); same for the podName + "localhost", + }, + IPs: []net.IP{ + // NOTE: PodIP is not required (it is the same as the control plane IP) + net.IPv4(127, 0, 0, 1), + net.IPv6loopback, + net.ParseIP(controlPlaneIP), + }, + } + + return &certs.Config{ + CommonName: "kube-apiserver", + AltNames: *altNames, + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + } +} + +// adminClientCertificateConfig returns the config for an admin client certificate +// to be used for access to the API server. +func adminClientCertificateConfig() *certs.Config { + return &certs.Config{ + CommonName: "kubernetes-admin", + Organization: []string{"system:masters"}, + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + } +} + +// etcdServerCertificateConfig returns the config for an etcd member serving certificate. +func etcdServerCertificateConfig(podName, podIP string) *certs.Config { + altNames := certs.AltNames{ + DNSNames: []string{ + "localhost", + podName, + }, + IPs: []net.IP{ + net.IPv4(127, 0, 0, 1), + net.IPv6loopback, + net.ParseIP(podIP), + }, + } + + return &certs.Config{ + CommonName: podName, + AltNames: altNames, + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + } +} diff --git a/test/infrastructure/tmp-to-be-deleted/server/doc.go b/test/infrastructure/tmp-to-be-deleted/server/doc.go new file mode 100644 index 0000000000..43fdd44064 --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/server/doc.go @@ -0,0 +1,40 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package server implements a server that can be used to fake the APIServer and etcd running +in the workload clusters; the implementation is designed specifically to make Cluster API +and the Kubeadm Control Plane provider happy during a stress test (it is not complete or +production ready, and it will never be). + +There are many factors shaping the implementation. + + - The server has to be able to serve requests for many workload clusters. + - The server has to serve all kind of requests CAPI core controllers and the kubeadm + control plane controller are sending to workload clusters + - Among those request there are also two port-forward requests, one targeting the + kube-apiserver pods, the other the etcd pods generated by kubeadm. The server has to + be able to act as a target for those requests too (it will port forward to itself). + - The server needs to comply to the CAPI contract, e.g. the control plane endpoint is in the + form of host:port, the port is allocated first but the server starts answering later when + the first CP instance comes up etc. + +The implementation is inspired from https://fideloper.com/golang-proxy-multiple-listeners +(kudos to the author!), and it consists of a server that has support for multiplexing requests for +many workload clusters, each one with its own host:port listener, to a single handler/backend +implementation. +*/ +package server diff --git a/test/infrastructure/tmp-to-be-deleted/server/etcd/doc.go b/test/infrastructure/tmp-to-be-deleted/server/etcd/doc.go new file mode 100644 index 0000000000..3dbb644019 --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/server/etcd/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package etcd implements a fake etcd server, designed specifically to make Cluster API +and the Kubeadm Control Plane provider happy during a stress test (it is not complete or +production ready, and it will never be). +*/ +package etcd diff --git a/test/infrastructure/tmp-to-be-deleted/server/etcd/handler.go b/test/infrastructure/tmp-to-be-deleted/server/etcd/handler.go new file mode 100644 index 0000000000..067e63a596 --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/server/etcd/handler.go @@ -0,0 +1,358 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd + +import ( + "context" + "fmt" + "net/http" + "strconv" + "strings" + "time" + + "github.com/go-logr/logr" + "github.com/pkg/errors" + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + cloudv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/api/v1alpha1" + inmemoryruntime "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/runtime" +) + +// ResourceGroupResolver defines a func that can identify which workloadCluster/resourceGroup a +// request targets. +type ResourceGroupResolver func(host string) (string, error) + +// NewEtcdServerHandler returns an http.Handler for fake etcd members. +func NewEtcdServerHandler(manager inmemoryruntime.Manager, log logr.Logger, resolver ResourceGroupResolver) http.Handler { + svr := grpc.NewServer() + + baseSvr := &baseServer{ + manager: manager, + log: log, + resourceGroupResolver: resolver, + } + + clusterServerSrv := &clusterServerServer{ + baseServer: baseSvr, + } + pb.RegisterClusterServer(svr, clusterServerSrv) + + maintenanceSrv := &maintenanceServer{ + baseServer: baseSvr, + } + pb.RegisterMaintenanceServer(svr, maintenanceSrv) + + return svr +} + +// clusterServerServer implements the MaintenanceServer grpc server. +type maintenanceServer struct { + *baseServer +} + +func (m *maintenanceServer) Alarm(ctx context.Context, _ *pb.AlarmRequest) (*pb.AlarmResponse, error) { + var resourceGroup string + start := time.Now() + defer func() { + requestLatency.WithLabelValues("Alarm", resourceGroup).Observe(time.Since(start).Seconds()) + }() + + var etcdMember string + var err error + resourceGroup, etcdMember, err = m.getResourceGroupAndMember(ctx) + if err != nil { + return nil, err + } + + m.log.V(4).Info("Etcd: Alarm", "resourceGroup", resourceGroup, "etcdMember", etcdMember) + + return &pb.AlarmResponse{}, nil +} + +func (m *maintenanceServer) Status(ctx context.Context, _ *pb.StatusRequest) (*pb.StatusResponse, error) { + var resourceGroup string + start := time.Now() + defer func() { + requestLatency.WithLabelValues("Status", resourceGroup).Observe(time.Since(start).Seconds()) + }() + + var etcdMember string + var err error + resourceGroup, etcdMember, err = m.getResourceGroupAndMember(ctx) + if err != nil { + return nil, err + } + inmemoryClient := m.manager.GetResourceGroup(resourceGroup).GetClient() + + m.log.V(4).Info("Etcd: Status", "resourceGroup", resourceGroup, "etcdMember", etcdMember) + _, statusResponse, err := m.inspectEtcd(ctx, inmemoryClient, etcdMember) + if err != nil { + return nil, err + } + + return statusResponse, nil +} + +func (m *maintenanceServer) Defragment(_ context.Context, _ *pb.DefragmentRequest) (*pb.DefragmentResponse, error) { + return nil, fmt.Errorf("not implemented: Defragment") +} + +func (m *maintenanceServer) Hash(_ context.Context, _ *pb.HashRequest) (*pb.HashResponse, error) { + return nil, fmt.Errorf("not implemented: Hash") +} + +func (m *maintenanceServer) HashKV(_ context.Context, _ *pb.HashKVRequest) (*pb.HashKVResponse, error) { + return nil, fmt.Errorf("not implemented: HashKV") +} + +func (m *maintenanceServer) Snapshot(_ *pb.SnapshotRequest, _ pb.Maintenance_SnapshotServer) error { + return fmt.Errorf("not implemented: Snapshot") +} + +func (m *maintenanceServer) MoveLeader(ctx context.Context, req *pb.MoveLeaderRequest) (*pb.MoveLeaderResponse, error) { + var resourceGroup string + start := time.Now() + defer func() { + requestLatency.WithLabelValues("MoveLeader", resourceGroup).Observe(time.Since(start).Seconds()) + }() + + out := new(pb.MoveLeaderResponse) + var err error + resourceGroup, _, err = m.getResourceGroupAndMember(ctx) + if err != nil { + return nil, err + } + etcdPods := &corev1.PodList{} + inmemoryClient := m.manager.GetResourceGroup(resourceGroup).GetClient() + if err := inmemoryClient.List(ctx, etcdPods, + client.InNamespace(metav1.NamespaceSystem), + client.MatchingLabels{ + "component": "etcd", + "tier": "control-plane"}, + ); err != nil { + return nil, errors.Wrap(err, "failed to list etcd members") + } + + if len(etcdPods.Items) == 0 { + return nil, errors.New("failed to list etcd members: no etcd pods found") + } + + for i := range etcdPods.Items { + pod := &etcdPods.Items[i] + for k, v := range pod.GetAnnotations() { + if k == cloudv1.EtcdMemberIDAnnotationName { + target := strconv.FormatInt(int64(req.TargetID), 10) + if v == target { + updatedPod := pod.DeepCopy() + annotations := updatedPod.GetAnnotations() + annotations[cloudv1.EtcdLeaderFromAnnotationName] = time.Now().Format(time.RFC3339) + updatedPod.SetAnnotations(annotations) + err := inmemoryClient.Patch(ctx, updatedPod, client.MergeFrom(pod)) + if err != nil { + return nil, err + } + return out, nil + } + } + } + } + // If we reach this point leadership was not moved. + return nil, errors.Errorf("etcd member with ID %d did not become the leader: expected etcd Pod not found", req.TargetID) +} + +func (m *maintenanceServer) Downgrade(_ context.Context, _ *pb.DowngradeRequest) (*pb.DowngradeResponse, error) { + return nil, fmt.Errorf("not implemented: Downgrade") +} + +// clusterServerServer implements the ClusterServer grpc server. +type clusterServerServer struct { + *baseServer +} + +func (c *clusterServerServer) MemberAdd(_ context.Context, _ *pb.MemberAddRequest) (*pb.MemberAddResponse, error) { + return nil, fmt.Errorf("not implemented: MemberAdd") +} + +func (c *clusterServerServer) MemberRemove(ctx context.Context, req *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) { + var resourceGroup string + start := time.Now() + defer func() { + requestLatency.WithLabelValues("MemberRemove", resourceGroup).Observe(time.Since(start).Seconds()) + }() + + out := new(pb.MemberRemoveResponse) + var err error + resourceGroup, _, err = c.getResourceGroupAndMember(ctx) + if err != nil { + return nil, err + } + inmemoryClient := c.manager.GetResourceGroup(resourceGroup).GetClient() + + etcdPods := &corev1.PodList{} + + if err := inmemoryClient.List(ctx, etcdPods, + client.InNamespace(metav1.NamespaceSystem), + client.MatchingLabels{ + "component": "etcd", + "tier": "control-plane"}, + ); err != nil { + return nil, errors.Wrap(err, "failed to list etcd members") + } + + for i := range etcdPods.Items { + pod := etcdPods.Items[i] + memberID := pod.Annotations[cloudv1.EtcdMemberIDAnnotationName] + if memberID != fmt.Sprintf("%d", req.ID) { + continue + } + updatedPod := pod.DeepCopy() + updatedPod.Annotations[cloudv1.EtcdMemberRemoved] = "" + if err := inmemoryClient.Patch(ctx, updatedPod, client.MergeFrom(&pod)); err != nil { + return nil, err + } + return out, nil + } + return nil, errors.Errorf("no etcd member with id %d found", req.ID) +} + +func (c *clusterServerServer) MemberUpdate(_ context.Context, _ *pb.MemberUpdateRequest) (*pb.MemberUpdateResponse, error) { + return nil, fmt.Errorf("not implemented: MemberUpdate") +} + +func (c *clusterServerServer) MemberList(ctx context.Context, _ *pb.MemberListRequest) (*pb.MemberListResponse, error) { + var resourceGroup string + start := time.Now() + defer func() { + requestLatency.WithLabelValues("MemberList", resourceGroup).Observe(time.Since(start).Seconds()) + }() + + var etcdMember string + var err error + resourceGroup, etcdMember, err = c.getResourceGroupAndMember(ctx) + if err != nil { + return nil, err + } + inmemoryClient := c.manager.GetResourceGroup(resourceGroup).GetClient() + + c.log.V(4).Info("Etcd: MemberList", "resourceGroup", resourceGroup, "etcdMember", etcdMember) + memberList, _, err := c.inspectEtcd(ctx, inmemoryClient, etcdMember) + if err != nil { + return nil, err + } + + return memberList, nil +} + +func (c *clusterServerServer) MemberPromote(_ context.Context, _ *pb.MemberPromoteRequest) (*pb.MemberPromoteResponse, error) { + return nil, fmt.Errorf("not implemented: MemberPromote") +} + +type baseServer struct { + manager inmemoryruntime.Manager + log logr.Logger + resourceGroupResolver ResourceGroupResolver +} + +func (b *baseServer) getResourceGroupAndMember(ctx context.Context) (resourceGroup string, etcdMember string, err error) { + localAddr := ctx.Value(http.LocalAddrContextKey) + resourceGroup, err = b.resourceGroupResolver(fmt.Sprintf("%s", localAddr)) + if err != nil { + return "", "", err + } + + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return "", "", errors.Errorf("failed to get metadata when processing request to etcd in resourceGroup %s", resourceGroup) + } + // Calculate the etcd member name by trimming the "etcd-" prefix from ":authority" metadata. + etcdMember = strings.TrimPrefix(strings.Join(md.Get(":authority"), ","), "etcd-") + return +} + +func (b *baseServer) inspectEtcd(ctx context.Context, inmemoryClient inmemoryruntime.Client, etcdMember string) (*pb.MemberListResponse, *pb.StatusResponse, error) { + etcdPods := &corev1.PodList{} + if err := inmemoryClient.List(ctx, etcdPods, + client.InNamespace(metav1.NamespaceSystem), + client.MatchingLabels{ + "component": "etcd", + "tier": "control-plane"}, + ); err != nil { + return nil, nil, errors.Wrap(err, "failed to list etcd members") + } + + memberList := &pb.MemberListResponse{} + statusResponse := &pb.StatusResponse{} + var clusterID int + var leaderID int + var leaderFrom time.Time + for _, pod := range etcdPods.Items { + if _, ok := pod.Annotations[cloudv1.EtcdMemberRemoved]; ok { + if pod.Name == fmt.Sprintf("%s%s", "etcd-", etcdMember) { + return nil, nil, errors.New("inspect called on etcd which has been removed") + } + continue + } + if clusterID == 0 { + var err error + clusterID, err = strconv.Atoi(pod.Annotations[cloudv1.EtcdClusterIDAnnotationName]) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed read cluster ID annotation from etcd member with name %s", pod.Name) + } + } else if pod.Annotations[cloudv1.EtcdClusterIDAnnotationName] != fmt.Sprintf("%d", clusterID) { + return nil, nil, errors.New("invalid etcd cluster, members have different cluster ID") + } + + memberID, err := strconv.Atoi(pod.Annotations[cloudv1.EtcdMemberIDAnnotationName]) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed read member ID annotation from etcd member with name %s", pod.Name) + } + + if t, err := time.Parse(time.RFC3339, pod.Annotations[cloudv1.EtcdLeaderFromAnnotationName]); err == nil { + if t.After(leaderFrom) { + leaderID = memberID + leaderFrom = t + } + } + + if pod.Name == etcdMember { + memberList.Header = &pb.ResponseHeader{ + ClusterId: uint64(clusterID), + MemberId: uint64(memberID), + } + statusResponse.Header = memberList.Header + } + memberList.Members = append(memberList.Members, &pb.Member{ + ID: uint64(memberID), + Name: strings.TrimPrefix(pod.Name, "etcd-"), + }) + } + + if leaderID == 0 { + // TODO: consider if and how to automatically recover from this case + // note: this can happen also when adding a new etcd members in the handler, might be it is something we have to take case before deletion... + // for now it should not be an issue because KCP forwards etcd leadership before deletion. + return nil, nil, errors.New("invalid etcd cluster, no leader found") + } + statusResponse.Leader = uint64(leaderID) + + return memberList, statusResponse, nil +} diff --git a/test/infrastructure/tmp-to-be-deleted/server/etcd/handler_test.go b/test/infrastructure/tmp-to-be-deleted/server/etcd/handler_test.go new file mode 100644 index 0000000000..399dcc86f1 --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/server/etcd/handler_test.go @@ -0,0 +1,121 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd + +import ( + "context" + "fmt" + "testing" + "time" + + . "github.com/onsi/gomega" + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "google.golang.org/grpc/metadata" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/log" + + cloudv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/api/v1alpha1" + inmemoryruntime "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/runtime" +) + +func Test_etcd_scalingflow(t *testing.T) { + scheme := runtime.NewScheme() + _ = clientgoscheme.AddToScheme(scheme) + + // During a scale down event - for example during upgrade - KCP will call `MoveLeader` and `MemberRemove` in sequence. + g := NewWithT(t) + ctx := metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{":authority": "etcd-1"})) + manager := inmemoryruntime.NewManager(scheme) + resourceGroupResolver := func(host string) (string, error) { return "group1", nil } + c := &clusterServerServer{ + baseServer: &baseServer{ + log: log.FromContext(ctx), + manager: manager, + resourceGroupResolver: resourceGroupResolver, + }, + } + + m := &maintenanceServer{ + baseServer: &baseServer{ + log: log.FromContext(ctx), + manager: manager, + resourceGroupResolver: resourceGroupResolver, + }, + } + c.manager.AddResourceGroup("group1") + inmemoryClient := c.manager.GetResourceGroup("group1").GetClient() + + for i := 1; i <= 3; i++ { + etcdMember := fmt.Sprintf("etcd-%d", i) + etcdPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceSystem, + Name: etcdMember, + Labels: map[string]string{ + "component": "etcd", + "tier": "control-plane", + }, + Annotations: map[string]string{ + cloudv1.EtcdMemberIDAnnotationName: fmt.Sprintf("%d", i), + cloudv1.EtcdClusterIDAnnotationName: "15", + }, + }, + Spec: corev1.PodSpec{ + NodeName: fmt.Sprintf("etcd-%d", i), + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + }, + }, + } + // Initially set leader to `etcd-1` + if i == 1 { + etcdPod.Annotations[cloudv1.EtcdLeaderFromAnnotationName] = time.Date(2020, 07, 03, 14, 25, 58, 651387237, time.UTC).Format(time.RFC3339) + } + g.Expect(inmemoryClient.Create(ctx, etcdPod)).To(Succeed()) + } + var etcdMemberToRemove uint64 = 2 + var etcdMemberToBeLeader uint64 = 3 + + t.Run("move leader and remove etcd member", func(t *testing.T) { + _, err := m.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: etcdMemberToBeLeader}) + g.Expect(err).NotTo(HaveOccurred()) + + _, err = c.MemberRemove(ctx, &pb.MemberRemoveRequest{ID: etcdMemberToRemove}) + g.Expect(err).NotTo(HaveOccurred()) + + // Expect the inspect call to fail on a member which has been removed. + _, _, err = c.inspectEtcd(ctx, inmemoryClient, fmt.Sprintf("%d", etcdMemberToRemove)) + g.Expect(err).To(HaveOccurred()) + + // inspectEtcd should succeed when calling on a member that has not been removed. + members, status, err := c.inspectEtcd(ctx, inmemoryClient, fmt.Sprintf("%d", etcdMemberToBeLeader)) + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(status.Leader).To(Equal(etcdMemberToBeLeader)) + g.Expect(members.GetMembers()).To(HaveLen(2)) + g.Expect(members.GetMembers()).NotTo(ContainElement(fmt.Sprintf("etcd-%d", etcdMemberToRemove))) + }) +} diff --git a/test/infrastructure/tmp-to-be-deleted/server/etcd/metrics.go b/test/infrastructure/tmp-to-be-deleted/server/etcd/metrics.go new file mode 100644 index 0000000000..5a59a0bce6 --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/server/etcd/metrics.go @@ -0,0 +1,41 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd + +import ( + "github.com/prometheus/client_golang/prometheus" + ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" +) + +func init() { + // Note: It probably makes sense to check if we can expose the same grpc metrics as etcd itself + // via some existing util. + + // Register the metrics at the controller-runtime metrics registry. + ctrlmetrics.Registry.MustRegister(requestLatency) +} + +var ( + requestLatency = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "capim_etcd_request_duration_seconds", + Help: "Request latency in seconds.", + Buckets: []float64{0.005, 0.025, 0.05, 0.1, 0.2, 0.4, 0.6, 0.8, 1.0, 1.25, 1.5, 2, 3, + 4, 5, 6, 8, 10, 15, 20, 30, 45, 60}, + }, []string{"grpc_method", "cluster_name"}, + ) +) diff --git a/test/infrastructure/tmp-to-be-deleted/server/listener.go b/test/infrastructure/tmp-to-be-deleted/server/listener.go new file mode 100644 index 0000000000..3e061c6af4 --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/server/listener.go @@ -0,0 +1,138 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "fmt" + "net" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + "sigs.k8s.io/cluster-api/util/certs" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// WorkloadClusterListener represents a listener for a workload cluster. +type WorkloadClusterListener struct { + host string + port int + + scheme *runtime.Scheme + + apiServers sets.Set[string] + apiServerCaCertificate *x509.Certificate + apiServerCaKey *rsa.PrivateKey + apiServerServingCertificate *tls.Certificate + + adminCertificate *x509.Certificate + adminKey *rsa.PrivateKey + + etcdMembers sets.Set[string] + etcdServingCertificates map[string]*tls.Certificate + + listener net.Listener +} + +// Host returns the host of a WorkloadClusterListener. +func (s *WorkloadClusterListener) Host() string { + return s.host +} + +// Port returns the port of a WorkloadClusterListener. +func (s *WorkloadClusterListener) Port() int { + return s.port +} + +// Address returns the address of a WorkloadClusterListener. +func (s *WorkloadClusterListener) Address() string { + return fmt.Sprintf("https://%s", s.HostPort()) +} + +// HostPort returns the host port of a WorkloadClusterListener. +func (s *WorkloadClusterListener) HostPort() string { + return net.JoinHostPort(s.host, fmt.Sprintf("%d", s.port)) +} + +// RESTConfig returns the rest config for a WorkloadClusterListener. +func (s *WorkloadClusterListener) RESTConfig() (*rest.Config, error) { + kubeConfig := clientcmdapi.Config{ + Clusters: map[string]*clientcmdapi.Cluster{ + "in-memory": { + Server: s.Address(), + CertificateAuthorityData: certs.EncodeCertPEM(s.apiServerCaCertificate), // TODO: convert to PEM (store in double format + }, + }, + AuthInfos: map[string]*clientcmdapi.AuthInfo{ + "in-memory": { + Username: "in-memory", + ClientCertificateData: certs.EncodeCertPEM(s.adminCertificate), // TODO: convert to PEM + ClientKeyData: certs.EncodePrivateKeyPEM(s.adminKey), // TODO: convert to PEM + }, + }, + Contexts: map[string]*clientcmdapi.Context{ + "in-memory": { + Cluster: "in-memory", + AuthInfo: "in-memory", + }, + }, + CurrentContext: "in-memory", + } + + b, err := clientcmd.Write(kubeConfig) + if err != nil { + return nil, err + } + + restConfig, err := clientcmd.RESTConfigFromKubeConfig(b) + if err != nil { + return nil, err + } + + return restConfig, nil +} + +// GetClient returns a client for a WorkloadClusterListener. +func (s *WorkloadClusterListener) GetClient() (client.WithWatch, error) { + restConfig, err := s.RESTConfig() + if err != nil { + return nil, err + } + + httpClient, err := rest.HTTPClientFor(restConfig) + if err != nil { + return nil, err + } + + mapper, err := apiutil.NewDynamicRESTMapper(restConfig, httpClient) + if err != nil { + return nil, err + } + + c, err := client.NewWithWatch(restConfig, client.Options{Scheme: s.scheme, Mapper: mapper}) + if err != nil { + return nil, err + } + + return c, nil +} diff --git a/test/infrastructure/tmp-to-be-deleted/server/mux.go b/test/infrastructure/tmp-to-be-deleted/server/mux.go new file mode 100644 index 0000000000..9ee0f4fd90 --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/server/mux.go @@ -0,0 +1,586 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "context" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "fmt" + "net" + "net/http" + "strings" + "sync" + "time" + + "github.com/go-logr/logr" + "github.com/pkg/errors" + "golang.org/x/net/http2" + "golang.org/x/net/http2/h2c" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" + infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/api/v1alpha1" + "sigs.k8s.io/cluster-api/util/certs" + "sigs.k8s.io/controller-runtime/pkg/log" + + inmemoryruntime "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/runtime" + inmemoryapi "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/server/api" + inmemoryetcd "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/server/etcd" +) + +const ( + // DefaultDebugPort default debug port of the workload clusters mux. + DefaultDebugPort = 19000 + + // This range allows for 4k clusters, which is 4 times the goal we have in mind for + // the first iteration of stress tests. + + // DefaultMinPort default min port of the workload clusters mux. + DefaultMinPort = 20000 + // DefaultMaxPort default max port of the workload clusters mux. + DefaultMaxPort = 24000 +) + +// WorkloadClustersMuxOption define an option for the WorkloadClustersMux creation. +type WorkloadClustersMuxOption interface { + Apply(*WorkloadClustersMuxOptions) +} + +// WorkloadClustersMuxOptions are options for the workload clusters mux. +type WorkloadClustersMuxOptions struct { + MinPort int + MaxPort int + DebugPort int +} + +// ApplyOptions applies WorkloadClustersMuxOption to the current WorkloadClustersMuxOptions. +func (o *WorkloadClustersMuxOptions) ApplyOptions(opts []WorkloadClustersMuxOption) *WorkloadClustersMuxOptions { + for _, opt := range opts { + opt.Apply(o) + } + return o +} + +// CustomPorts allows to customize the ports used by the workload clusters mux. +type CustomPorts struct { + MinPort int + MaxPort int + DebugPort int +} + +// Apply applies this configuration to the given WorkloadClustersMuxOptions. +func (c CustomPorts) Apply(options *WorkloadClustersMuxOptions) { + options.MinPort = c.MinPort + options.MaxPort = c.MaxPort + options.DebugPort = c.DebugPort +} + +// WorkloadClustersMux implements a server that handles requests for multiple workload clusters. +// Each workload clusters will get its own listener, serving on a dedicated port, eg. +// wkl-cluster-1 >> :20000, wkl-cluster-2 >> :20001 etc. +// Each workload cluster will act both as API server and as etcd for the cluster; the +// WorkloadClustersMux is also responsible for handling certificates for each of the above use cases. +type WorkloadClustersMux struct { + host string + minPort int // TODO: move port management to a port range type + maxPort int + portIndex int + + manager inmemoryruntime.Manager // TODO: figure out if we can have a smaller interface (GetResourceGroup, GetSchema) + + debugServer http.Server + muxServer http.Server + workloadClusterListeners map[string]*WorkloadClusterListener + // workloadClusterNameByHost maps from Host to workload cluster name. + workloadClusterNameByHost map[string]string + + lock sync.RWMutex + log logr.Logger +} + +// NewWorkloadClustersMux returns a WorkloadClustersMux that handles requests for multiple workload clusters. +func NewWorkloadClustersMux(manager inmemoryruntime.Manager, host string, opts ...WorkloadClustersMuxOption) (*WorkloadClustersMux, error) { + options := WorkloadClustersMuxOptions{ + MinPort: DefaultMinPort, + MaxPort: DefaultMaxPort, + DebugPort: DefaultDebugPort, + } + options.ApplyOptions(opts) + + m := &WorkloadClustersMux{ + host: host, + minPort: options.MinPort, + maxPort: options.MaxPort, + portIndex: options.MinPort, + manager: manager, + workloadClusterListeners: map[string]*WorkloadClusterListener{}, + workloadClusterNameByHost: map[string]string{}, + log: log.Log, + } + + //nolint:gosec // Ignoring the following for now: "G112: Potential Slowloris Attack because ReadHeaderTimeout is not configured in the http.Server (gosec)" + m.muxServer = http.Server{ + // Use an handler that can serve either API server calls or etcd calls. + Handler: m.mixedHandler(), + // Use a TLS config that selects certificates for a specific cluster depending on + // the request being processed (API server and etcd have different certificates). + TLSConfig: &tls.Config{ + GetCertificate: func(info *tls.ClientHelloInfo) (*tls.Certificate, error) { + return m.getCertificate(info) + }, + MinVersion: tls.VersionTLS12, + }, + } + + //nolint:gosec // Ignoring the following for now: "G112: Potential Slowloris Attack because ReadHeaderTimeout is not configured in the http.Server (gosec)" + m.debugServer = http.Server{ + Handler: inmemoryapi.NewDebugHandler(manager, m.log, m), + } + l, err := net.Listen("tcp", net.JoinHostPort(host, fmt.Sprintf("%d", options.DebugPort))) + if err != nil { + return nil, errors.Wrapf(err, "failed to create listener for workload cluster mux") + } + go func() { _ = m.debugServer.Serve(l) }() + + return m, nil +} + +// mixedHandler returns an handler that can serve either API server calls or etcd calls. +func (m *WorkloadClustersMux) mixedHandler() http.Handler { + // Prepare a function that can identify which workloadCluster/resourceGroup a + // request targets to. + // IMPORTANT: this function assumes that both the listener and the resourceGroup + // for a workload cluster have the same name. + resourceGroupResolver := func(host string) (string, error) { + m.lock.RLock() + defer m.lock.RUnlock() + wclName, ok := m.workloadClusterNameByHost[host] + if !ok { + return "", errors.Errorf("failed to get workloadClusterListener for host %s", host) + } + return wclName, nil + } + + // build the handlers for API server and etcd. + apiHandler := inmemoryapi.NewAPIServerHandler(m.manager, m.log, resourceGroupResolver) + etcdHandler := inmemoryetcd.NewEtcdServerHandler(m.manager, m.log, resourceGroupResolver) + + // Creates the mixed handler combining the two above depending on + // the type of request being processed + mixedHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.ProtoMajor == 2 && strings.HasPrefix(r.Header.Get("content-type"), "application/grpc") { + etcdHandler.ServeHTTP(w, r) + return + } + apiHandler.ServeHTTP(w, r) + }) + + return h2c.NewHandler(mixedHandler, &http2.Server{}) +} + +// getCertificate selects certificates for a specific cluster depending on the request being processed +// (API server and etcd have different certificates). +func (m *WorkloadClustersMux) getCertificate(info *tls.ClientHelloInfo) (*tls.Certificate, error) { + m.lock.RLock() + defer m.lock.RUnlock() + + // Identify which workloadCluster/resourceGroup a request targets to. + hostPort := info.Conn.LocalAddr().String() + wclName, ok := m.workloadClusterNameByHost[hostPort] + if !ok { + err := errors.Errorf("failed to get listener name for workload cluster serving on %s", hostPort) + m.log.Error(err, "Error resolving certificates") + return nil, err + } + + // Gets the listener config for the target workloadCluster. + wcl, ok := m.workloadClusterListeners[wclName] + if !ok { + err := errors.Errorf("failed to get listener with name %s for workload cluster serving on %s", wclName, hostPort) + m.log.Error(err, "Error resolving certificates") + return nil, err + } + + // If the request targets a specific etcd member, use the corresponding server certificates + // NOTE: the port forward call to etcd sets the server name to the name of the targeted etcd pod, + // which is also the name of the corresponding etcd member. + if wcl.etcdMembers.Has(info.ServerName) { + m.log.V(4).Info("Using etcd serving certificate", "listenerName", wcl, "host", hostPort, "etcdPod", info.ServerName) + return wcl.etcdServingCertificates[info.ServerName], nil + } + + // Otherwise we assume the request targets the API server. + m.log.V(4).Info("Using API server serving certificate", "listenerName", wcl, "host", hostPort) + return wcl.apiServerServingCertificate, nil +} + +// HotRestart tries to set up the mux according to an existing set of InMemoryClusters. +// NOTE: This is done at best effort in order to make iterative development workflows easier. +func (m *WorkloadClustersMux) HotRestart(clusters *infrav1.InMemoryClusterList) error { + if len(clusters.Items) == 0 { + return nil + } + + m.lock.Lock() + defer m.lock.Unlock() + + if len(m.workloadClusterListeners) > 0 { + return errors.New("WorkloadClustersMux cannot be hot restarted when there are already initialized listeners") + } + + ports := sets.Set[int]{} + maxPort := m.minPort - 1 + for _, c := range clusters.Items { + if c.Spec.ControlPlaneEndpoint.Host == "" { + continue + } + + if c.Spec.ControlPlaneEndpoint.Host != m.host { + return errors.Errorf("unable to restart the WorkloadClustersMux, the host address is changed from %s to %s", c.Spec.ControlPlaneEndpoint.Host, m.host) + } + + if ports.Has(c.Spec.ControlPlaneEndpoint.Port) { + return errors.Errorf("unable to restart the WorkloadClustersMux, there are two or more clusters using port %d", c.Spec.ControlPlaneEndpoint.Port) + } + + resourceGroup, ok := c.Annotations[infrav1.ResourceGroupAnnotationName] + if !ok { + return errors.Errorf("unable to restart the WorkloadClustersMux, cluster %s doesn't have the %s annotation", klog.KRef(c.Namespace, c.Name), infrav1.ResourceGroupAnnotationName) + } + + m.initWorkloadClusterListenerWithPortLocked(resourceGroup, c.Spec.ControlPlaneEndpoint.Port) + + if maxPort < c.Spec.ControlPlaneEndpoint.Port { + maxPort = c.Spec.ControlPlaneEndpoint.Port + } + } + + m.portIndex = maxPort + 1 + return nil +} + +// InitWorkloadClusterListener initialize a WorkloadClusterListener by reserving a port for it. +// Note: The listener will be started when the first API server will be added. +func (m *WorkloadClustersMux) InitWorkloadClusterListener(wclName string) (*WorkloadClusterListener, error) { + m.lock.Lock() + defer m.lock.Unlock() + + if wcl, ok := m.workloadClusterListeners[wclName]; ok { + return wcl, nil + } + + port, err := m.getFreePortLocked() + if err != nil { + return nil, err + } + + wcl := m.initWorkloadClusterListenerWithPortLocked(wclName, port) + + return wcl, nil +} + +// initWorkloadClusterListenerWithPortLocked initializes a workload cluster listener. +// Note: m.lock must be locked before calling this method. +func (m *WorkloadClustersMux) initWorkloadClusterListenerWithPortLocked(wclName string, port int) *WorkloadClusterListener { + wcl := &WorkloadClusterListener{ + scheme: m.manager.GetScheme(), + host: m.host, + port: port, + apiServers: sets.New[string](), + etcdMembers: sets.New[string](), + etcdServingCertificates: map[string]*tls.Certificate{}, + } + m.workloadClusterListeners[wclName] = wcl + m.workloadClusterNameByHost[wcl.HostPort()] = wclName + + m.log.Info("Workload cluster listener created", "listenerName", wclName, "address", wcl.Address()) + return wcl +} + +// AddAPIServer mimics adding an API server instance behind the WorkloadClusterListener. +// When the first API server instance is added the serving certificates and the admin certificate +// for tests are generated, and the listener is started. +func (m *WorkloadClustersMux) AddAPIServer(wclName, podName string, caCert *x509.Certificate, caKey *rsa.PrivateKey) error { + // Start server + // Note: It is important that we unlock once the server is started. Because otherwise the server + // doesn't work yet as GetCertificate (which is required for the tls handshake) also requires the lock. + var startServerErr error + var wcl *WorkloadClusterListener + err := func() error { + m.lock.Lock() + defer m.lock.Unlock() + + var ok bool + wcl, ok = m.workloadClusterListeners[wclName] + if !ok { + return errors.Errorf("workloadClusterListener with name %s must be initialized before adding an APIserver", wclName) + } + wcl.apiServers.Insert(podName) + m.log.Info("APIServer instance added to workloadClusterListener", "listenerName", wclName, "address", wcl.Address(), "podName", podName) + + // TODO: check if cert/key are already set, they should match + wcl.apiServerCaCertificate = caCert + wcl.apiServerCaKey = caKey + + // Generate Serving certificates for the API server instance + // NOTE: There is only one server certificate for all API server instances (kubeadm + // instead creates one for each API server pod). We don't need this because we are + // accessing all API servers via the same endpoint. + if wcl.apiServerServingCertificate == nil { + config := apiServerCertificateConfig(wcl.host) + cert, key, err := newCertAndKey(caCert, caKey, config) + if err != nil { + return errors.Wrapf(err, "failed to create serving certificate for API server %s", podName) + } + + certificate, err := tls.X509KeyPair(certs.EncodeCertPEM(cert), certs.EncodePrivateKeyPEM(key)) + if err != nil { + return errors.Wrapf(err, "failed to create X509KeyPair for API server %s", podName) + } + wcl.apiServerServingCertificate = &certificate + } + + // Generate admin certificates to be used for accessing the API server. + // NOTE: this is used for tests because CAPI creates its own. + if wcl.adminCertificate == nil { + config := adminClientCertificateConfig() + cert, key, err := newCertAndKey(caCert, caKey, config) + if err != nil { + return errors.Wrapf(err, "failed to create admin certificate for API server %s", podName) + } + + wcl.adminCertificate = cert + wcl.adminKey = key + } + + // Start the listener for the API server. + // NOTE: There is only one listener for all API server instances; the same listener will act + // as a port forward target too. + if wcl.listener != nil { + return nil + } + + l, err := net.Listen("tcp", wcl.HostPort()) + if err != nil { + return errors.Wrapf(err, "failed to start WorkloadClusterListener %s, %s", wclName, wcl.HostPort()) + } + wcl.listener = l + + go func() { + if startServerErr = m.muxServer.ServeTLS(wcl.listener, "", ""); startServerErr != nil && !errors.Is(startServerErr, http.ErrServerClosed) { + m.log.Error(startServerErr, "Failed to start WorkloadClusterListener", "listenerName", wclName, "address", wcl.Address()) + } + }() + return nil + }() + if err != nil { + return errors.Wrapf(err, "error starting server") + } + + // Wait until the sever is working. + var pollErr error + err = wait.PollUntilContextTimeout(context.TODO(), 10*time.Millisecond, 1*time.Second, true, func(ctx context.Context) (done bool, err error) { + d := &net.Dialer{Timeout: 50 * time.Millisecond} + conn, err := tls.DialWithDialer(d, "tcp", wcl.HostPort(), &tls.Config{ + InsecureSkipVerify: true, //nolint:gosec // config is used to connect to our own port. + }) + if err != nil { + pollErr = fmt.Errorf("server is not reachable: %w", err) + return false, nil + } + + if err := conn.Close(); err != nil { + pollErr = fmt.Errorf("server is not reachable: closing connection: %w", err) + return false, nil + } + + return true, nil + }) + if err != nil { + return kerrors.NewAggregate([]error{err, pollErr}) + } + + if startServerErr != nil { + return startServerErr + } + + m.log.Info("WorkloadClusterListener successfully started", "listenerName", wclName, "address", wcl.Address()) + return nil +} + +// DeleteAPIServer removes an API server instance from the WorkloadClusterListener. +func (m *WorkloadClustersMux) DeleteAPIServer(wclName, podName string) error { + m.lock.Lock() + defer m.lock.Unlock() + + wcl, ok := m.workloadClusterListeners[wclName] + if !ok { + return errors.Errorf("workloadClusterListener with name %s must be initialized before removing an APIserver", wclName) + } + wcl.apiServers.Delete(podName) + m.log.Info("APIServer instance removed from the workloadClusterListener", "listenerName", wclName, "address", wcl.Address(), "podName", podName) + + if wcl.apiServers.Len() < 1 && wcl.listener != nil { + if err := wcl.listener.Close(); err != nil { + return errors.Wrapf(err, "failed to stop WorkloadClusterListener %s, %s", wclName, wcl.HostPort()) + } + wcl.listener = nil + m.log.Info("WorkloadClusterListener stopped because there are no APIServer left", "listenerName", wclName, "address", wcl.Address()) + } + return nil +} + +// HasAPIServer returns true if the workload cluster already has an apiserver with podName. +func (m *WorkloadClustersMux) HasAPIServer(wclName, podName string) bool { + m.lock.RLock() + defer m.lock.RUnlock() + + wcl, ok := m.workloadClusterListeners[wclName] + if !ok { + return false + } + return wcl.apiServers.Has(podName) +} + +// AddEtcdMember mimics adding an etcd Member behind the WorkloadClusterListener; +// every etcd member gets a dedicated serving certificate, so it will be possible to serve port forward requests +// to a specific etcd pod/member. +func (m *WorkloadClustersMux) AddEtcdMember(wclName, podName string, caCert *x509.Certificate, caKey *rsa.PrivateKey) error { + m.lock.Lock() + defer m.lock.Unlock() + + wcl, ok := m.workloadClusterListeners[wclName] + if !ok { + return errors.Errorf("workloadClusterListener with name %s must be initialized before adding an etcd member", wclName) + } + wcl.etcdMembers.Insert(podName) + m.log.Info("Etcd member added to WorkloadClusterListener", "listenerName", wclName, "address", wcl.Address(), "podName", podName) + + // Generate Serving certificates for the etcdMember + if _, ok := wcl.etcdServingCertificates[podName]; !ok { + config := etcdServerCertificateConfig(podName, wcl.host) + cert, key, err := newCertAndKey(caCert, caKey, config) + if err != nil { + return errors.Wrapf(err, "failed to create serving certificate for etcd member %s", podName) + } + + certificate, err := tls.X509KeyPair(certs.EncodeCertPEM(cert), certs.EncodePrivateKeyPEM(key)) + if err != nil { + return errors.Wrapf(err, "failed to create X509KeyPair for etcd member %s", podName) + } + wcl.etcdServingCertificates[podName] = &certificate + } + + return nil +} + +// HasEtcdMember returns true if the workload cluster already has an etcd member with podName. +func (m *WorkloadClustersMux) HasEtcdMember(wclName, podName string) bool { + m.lock.RLock() + defer m.lock.RUnlock() + + wcl, ok := m.workloadClusterListeners[wclName] + if !ok { + return false + } + return wcl.etcdMembers.Has(podName) +} + +// DeleteEtcdMember removes an etcd Member from the WorkloadClusterListener. +func (m *WorkloadClustersMux) DeleteEtcdMember(wclName, podName string) error { + m.lock.Lock() + defer m.lock.Unlock() + + wcl, ok := m.workloadClusterListeners[wclName] + if !ok { + return errors.Errorf("workloadClusterListener with name %s must be initialized before removing an etcd member", wclName) + } + wcl.etcdMembers.Delete(podName) + delete(wcl.etcdServingCertificates, podName) + m.log.Info("Etcd member removed from WorkloadClusterListener", "listenerName", wclName, "address", wcl.Address(), "podName", podName) + + return nil +} + +// ListListeners implements api.DebugInfoProvider. +func (m *WorkloadClustersMux) ListListeners() map[string]string { + m.lock.RLock() + defer m.lock.RUnlock() + + ret := map[string]string{} + for k, l := range m.workloadClusterListeners { + ret[k] = l.Address() + } + return ret +} + +// DeleteWorkloadClusterListener deletes a WorkloadClusterListener. +func (m *WorkloadClustersMux) DeleteWorkloadClusterListener(wclName string) error { + m.lock.Lock() + defer m.lock.Unlock() + + wcl, ok := m.workloadClusterListeners[wclName] + if !ok { + return nil + } + + if wcl.listener != nil { + if err := wcl.listener.Close(); err != nil { + return errors.Wrapf(err, "failed to stop WorkloadClusterListener %s, %s", wclName, wcl.HostPort()) + } + } + + delete(m.workloadClusterListeners, wclName) + delete(m.workloadClusterNameByHost, wcl.HostPort()) + + m.log.Info("Workload cluster listener deleted", "listenerName", wclName, "address", wcl.Address()) + return nil +} + +// Shutdown shuts down the workload cluster mux. +func (m *WorkloadClustersMux) Shutdown(ctx context.Context) error { + m.lock.Lock() + defer m.lock.Unlock() + + if err := m.debugServer.Shutdown(ctx); err != nil { + return errors.Wrap(err, "failed to shutdown the debug server") + } + + // NOTE: this closes all the listeners + if err := m.muxServer.Shutdown(ctx); err != nil { + return errors.Wrap(err, "failed to shutdown the mux server") + } + + return nil +} + +// getFreePortLocked gets a free port. +// Note: m.lock must be locked before calling this method. +func (m *WorkloadClustersMux) getFreePortLocked() (int, error) { + port := m.portIndex + if port > m.maxPort { + return -1, errors.Errorf("no more free ports in the %d-%d range", m.minPort, m.maxPort) + } + + // TODO: check the port is actually free. If not try the next one + + m.portIndex++ + return port, nil +} diff --git a/test/infrastructure/tmp-to-be-deleted/server/mux_test.go b/test/infrastructure/tmp-to-be-deleted/server/mux_test.go new file mode 100644 index 0000000000..da1613eca9 --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/server/mux_test.go @@ -0,0 +1,574 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "context" + cryptorand "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "fmt" + "math/big" + "testing" + "time" + + . "github.com/onsi/gomega" + "github.com/pkg/errors" + clientv3 "go.etcd.io/etcd/client/v3" + "google.golang.org/grpc" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/klog/v2" + "sigs.k8s.io/cluster-api/util/certs" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + cloudv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/api/v1alpha1" + inmemoryruntime "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/runtime" + inmemoryproxy "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/server/proxy" +) + +var ( + ctx = context.Background() + scheme = runtime.NewScheme() +) + +func init() { + _ = metav1.AddMetaToScheme(scheme) + _ = corev1.AddToScheme(scheme) + _ = rbacv1.AddToScheme(scheme) + + ctrl.SetLogger(klog.Background()) +} + +func TestMux(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + manager := inmemoryruntime.NewManager(scheme) + + wcl := "workload-cluster" + host := "127.0.0.1" + wcmux, err := NewWorkloadClustersMux(manager, host, CustomPorts{ + // NOTE: make sure to use ports different than other tests, so we can run tests in parallel + MinPort: DefaultMinPort, + MaxPort: DefaultMinPort + 99, + DebugPort: DefaultDebugPort, + }) + g.Expect(err).ToNot(HaveOccurred()) + + listener, err := wcmux.InitWorkloadClusterListener(wcl) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(listener.Host()).To(Equal(host)) + g.Expect(listener.Port()).ToNot(BeZero()) + + caCert, caKey, err := newCertificateAuthority() + g.Expect(err).ToNot(HaveOccurred()) + + etcdCert, etcdKey, err := newCertificateAuthority() + g.Expect(err).ToNot(HaveOccurred()) + + apiServerPod1 := "apiserver1" + err = wcmux.AddAPIServer(wcl, apiServerPod1, caCert, caKey) + g.Expect(err).ToNot(HaveOccurred()) + + etcdPodMember1 := "etcd1" + err = wcmux.AddEtcdMember(wcl, etcdPodMember1, etcdCert, etcdKey) + g.Expect(err).ToNot(HaveOccurred()) + + apiServerPod2 := "apiserver2" + err = wcmux.AddAPIServer(wcl, apiServerPod2, caCert, caKey) + g.Expect(err).ToNot(HaveOccurred()) + + etcdPodMember2 := "etcd2" + err = wcmux.AddEtcdMember(wcl, etcdPodMember2, etcdCert, etcdKey) + g.Expect(err).ToNot(HaveOccurred()) + + err = wcmux.DeleteAPIServer(wcl, apiServerPod2) + g.Expect(err).ToNot(HaveOccurred()) + + err = wcmux.DeleteEtcdMember(wcl, etcdPodMember2) + g.Expect(err).ToNot(HaveOccurred()) + + err = wcmux.DeleteAPIServer(wcl, apiServerPod1) + g.Expect(err).ToNot(HaveOccurred()) + + err = wcmux.DeleteEtcdMember(wcl, etcdPodMember1) + g.Expect(err).ToNot(HaveOccurred()) + + err = wcmux.DeleteWorkloadClusterListener(wcl) + g.Expect(err).ToNot(HaveOccurred()) + + err = wcmux.Shutdown(ctx) + g.Expect(err).ToNot(HaveOccurred()) +} + +func TestAPI_corev1_CRUD(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + wcmux, c := setupWorkloadClusterListener(g, CustomPorts{ + // NOTE: make sure to use ports different than other tests, so we can run tests in parallel + MinPort: DefaultMinPort + 100, + MaxPort: DefaultMinPort + 199, + DebugPort: DefaultDebugPort + 1, + }) + + // create + + n := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: "foo"}, + } + err := c.Create(ctx, n) + g.Expect(err).ToNot(HaveOccurred()) + + // list + + nl := &corev1.NodeList{} + err = c.List(ctx, nl) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(nl.Items).To(HaveLen(1)) + g.Expect(nl.Items[0].Name).To(Equal("foo")) + + // list with nodeName selector on pod + g.Expect(c.Create(ctx, &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "bar", Namespace: metav1.NamespaceDefault}, + Spec: corev1.PodSpec{NodeName: n.Name}, + })).To(Succeed()) + g.Expect(c.Create(ctx, &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "notSelectedPod", Namespace: metav1.NamespaceDefault}, + })).To(Succeed()) + + pl := &corev1.PodList{} + nodeNameSelector := &client.ListOptions{ + FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": n.Name}), + } + g.Expect(c.List(ctx, pl, nodeNameSelector)).To(Succeed()) + g.Expect(pl.Items).To(HaveLen(1)) + g.Expect(pl.Items[0].Name).To(Equal("bar")) + + // get + + n = &corev1.Node{} + err = c.Get(ctx, client.ObjectKey{Name: "foo"}, n) + g.Expect(err).ToNot(HaveOccurred()) + + // patch + + n2 := n.DeepCopy() + n2.Annotations = map[string]string{"foo": "bar"} + err = c.Patch(ctx, n2, client.MergeFrom(n)) + g.Expect(err).ToNot(HaveOccurred()) + + n3 := n2.DeepCopy() + taints := []corev1.Taint{{Key: "foo"}} + + n3.Spec.Taints = taints + err = c.Patch(ctx, n3, client.StrategicMergeFrom(n2)) + g.Expect(err).ToNot(HaveOccurred()) + + node := &corev1.Node{} + g.Expect(c.Get(ctx, client.ObjectKeyFromObject(n3), node)).To(Succeed()) + g.Expect(node.Spec.Taints).To(BeComparableTo(taints)) + + // delete + + err = c.Delete(ctx, n) + g.Expect(err).ToNot(HaveOccurred()) + + err = wcmux.Shutdown(ctx) + g.Expect(err).ToNot(HaveOccurred()) +} + +func TestAPI_rbacv1_CRUD(t *testing.T) { + t.Parallel() + g := NewWithT(t) + + wcmux, c := setupWorkloadClusterListener(g, CustomPorts{ + // NOTE: make sure to use ports different than other tests, so we can run tests in parallel + MinPort: DefaultMinPort + 200, + MaxPort: DefaultMinPort + 299, + DebugPort: DefaultDebugPort + 2, + }) + + // create + + cr := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{Name: "foo"}, + } + + err := c.Create(ctx, cr) + g.Expect(err).ToNot(HaveOccurred()) + + // list + + crl := &rbacv1.ClusterRoleList{} + err = c.List(ctx, crl) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(crl.Items).To(HaveLen(1)) + g.Expect(crl.Items[0].Name).To(Equal("foo")) + + // get + + err = c.Get(ctx, client.ObjectKey{Name: "foo"}, cr) + g.Expect(err).ToNot(HaveOccurred()) + + // patch + + cr2 := cr.DeepCopy() + cr2.Annotations = map[string]string{"foo": "bar"} + err = c.Patch(ctx, cr2, client.MergeFrom(cr)) + g.Expect(err).ToNot(HaveOccurred()) + + // delete + + err = c.Delete(ctx, cr) + g.Expect(err).ToNot(HaveOccurred()) + + err = wcmux.Shutdown(ctx) + g.Expect(err).ToNot(HaveOccurred()) +} + +func TestAPI_PortForward(t *testing.T) { + t.Parallel() + g := NewWithT(t) + manager := inmemoryruntime.NewManager(scheme) + + // TODO: deduplicate this setup code with the test above + host := "127.0.0.1" + wcmux, err := NewWorkloadClustersMux(manager, host, CustomPorts{ + // NOTE: make sure to use ports different than other tests, so we can run tests in parallel + MinPort: DefaultMinPort + 300, + MaxPort: DefaultMinPort + 399, + DebugPort: DefaultDebugPort + 3, + }) + g.Expect(err).ToNot(HaveOccurred()) + + // InfraCluster controller >> when "creating the load balancer" + wcl1 := "workload-cluster1" + listener, err := wcmux.InitWorkloadClusterListener(wcl1) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(listener.Host()).To(Equal(host)) + g.Expect(listener.Port()).ToNot(BeZero()) + + caCert, caKey, err := newCertificateAuthority() + g.Expect(err).ToNot(HaveOccurred()) + + // InfraMachine controller >> when "creating the API Server pod" + apiServerPod1 := "kube-apiserver-1" + err = wcmux.AddAPIServer(wcl1, apiServerPod1, caCert, caKey) + g.Expect(err).ToNot(HaveOccurred()) + + etcdCert, etcdKey, err := newCertificateAuthority() + g.Expect(err).ToNot(HaveOccurred()) + + // InfraMachine controller >> when "creating the Etcd member pod" + etcdPodMember1 := "etcd-1" + err = wcmux.AddEtcdMember(wcl1, etcdPodMember1, etcdCert, etcdKey) + g.Expect(err).ToNot(HaveOccurred()) + + // Setup resource group + manager.AddResourceGroup(wcl1) + + etcdPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceSystem, + Name: "etcd-1", + Labels: map[string]string{ + "component": "etcd", + "tier": "control-plane", + }, + Annotations: map[string]string{ + cloudv1.EtcdClusterIDAnnotationName: "1", + cloudv1.EtcdMemberIDAnnotationName: "2", + cloudv1.EtcdLeaderFromAnnotationName: time.Now().Format(time.RFC3339), + }, + }, + } + err = manager.GetResourceGroup(wcl1).GetClient().Create(ctx, etcdPod) + g.Expect(err).ToNot(HaveOccurred()) + + // Test API server TLS handshake via port forward. + + restConfig, err := listener.RESTConfig() + g.Expect(err).ToNot(HaveOccurred()) + + p1 := inmemoryproxy.Proxy{ + Kind: "pods", + Namespace: metav1.NamespaceSystem, + KubeConfig: restConfig, + Port: 1234, + } + + dialer1, err := inmemoryproxy.NewDialer(p1) + g.Expect(err).ToNot(HaveOccurred()) + + rawConn, err := dialer1.DialContextWithAddr(ctx, "kube-apiserver-foo") + g.Expect(err).ToNot(HaveOccurred()) + defer rawConn.Close() + + conn := tls.Client(rawConn, &tls.Config{InsecureSkipVerify: true}) //nolint:gosec // Intentionally not verifying the server cert here. + err = conn.HandshakeContext(ctx) + g.Expect(err).ToNot(HaveOccurred()) + defer conn.Close() + + // Test Etcd via port forward + + caPool := x509.NewCertPool() + caPool.AddCert(etcdCert) + + config := apiServerEtcdClientCertificateConfig() + cert, key, err := newCertAndKey(etcdCert, etcdKey, config) + g.Expect(err).ToNot(HaveOccurred()) + + clientCert, err := tls.X509KeyPair(certs.EncodeCertPEM(cert), certs.EncodePrivateKeyPEM(key)) + g.Expect(err).ToNot(HaveOccurred()) + + p2 := inmemoryproxy.Proxy{ + Kind: "pods", + Namespace: metav1.NamespaceSystem, + KubeConfig: restConfig, + Port: 2379, + } + + dialer2, err := inmemoryproxy.NewDialer(p2) + g.Expect(err).ToNot(HaveOccurred()) + + etcdClient1, err := clientv3.New(clientv3.Config{ + Endpoints: []string{etcdPodMember1}, + DialTimeout: 2 * time.Second, + + DialOptions: []grpc.DialOption{ + grpc.WithBlock(), // block until the underlying connection is up + grpc.WithContextDialer(dialer2.DialContextWithAddr), + }, + TLS: &tls.Config{ + RootCAs: caPool, + Certificates: []tls.Certificate{clientCert}, + MinVersion: tls.VersionTLS12, + }, + }) + g.Expect(err).ToNot(HaveOccurred()) + + ml, err := etcdClient1.MemberList(ctx) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ml.Members).To(HaveLen(1)) + g.Expect(ml.Members[0].Name).To(Equal("1")) + + err = etcdClient1.Close() + g.Expect(err).ToNot(HaveOccurred()) + + err = wcmux.Shutdown(ctx) + g.Expect(err).ToNot(HaveOccurred()) +} + +func TestAPI_corev1_Watch(t *testing.T) { + g := NewWithT(t) + + _, c := setupWorkloadClusterListener(g, CustomPorts{ + // NOTE: make sure to use ports different than other tests, so we can run tests in parallel + MinPort: DefaultMinPort + 400, + MaxPort: DefaultMinPort + 499, + DebugPort: DefaultDebugPort + 4, + }) + + ctx := context.Background() + + nodeWatcher, err := c.Watch(ctx, &corev1.NodeList{}) + g.Expect(err).ToNot(HaveOccurred()) + + podWatcher, err := c.Watch(ctx, &corev1.PodList{}) + g.Expect(err).ToNot(HaveOccurred()) + + expectedEvents := []string{"ADDED/foo", "MODIFIED/foo", "DELETED/foo", "ADDED/bar", "MODIFIED/bar", "DELETED/bar"} + receivedEvents := []string{} + done := make(chan bool) + go func() { + for { + select { + case event := <-nodeWatcher.ResultChan(): + o, ok := event.Object.(client.Object) + if !ok { + return + } + receivedEvents = append(receivedEvents, fmt.Sprintf("%s/%s", event.Type, o.GetName())) + case event := <-podWatcher.ResultChan(): + o, ok := event.Object.(client.Object) + if !ok { + return + } + receivedEvents = append(receivedEvents, fmt.Sprintf("%s/%s", event.Type, o.GetName())) + case <-done: + nodeWatcher.Stop() + } + } + }() + + // Test watcher on Node resources. + + node1 := &corev1.Node{} + node1.SetName("foo") + + // create node + err = c.Create(ctx, node1) + g.Expect(err).ToNot(HaveOccurred()) + + // get node + n := &corev1.Node{} + err = c.Get(ctx, client.ObjectKey{Name: "foo"}, n) + g.Expect(err).ToNot(HaveOccurred()) + + // patch node + nodeWithAnnotations := n.DeepCopy() + nodeWithAnnotations.SetAnnotations(map[string]string{"foo": "bar"}) + err = c.Patch(ctx, nodeWithAnnotations, client.MergeFrom(n)) + g.Expect(err).ToNot(HaveOccurred()) + + // delete node + err = c.Delete(ctx, n) + g.Expect(err).ToNot(HaveOccurred()) + + // Test watcher on Pod resources. + pod1 := &corev1.Pod{} + pod1.SetName("bar") + pod1.SetNamespace("one") + + // create pod + err = c.Create(ctx, pod1) + g.Expect(err).ToNot(HaveOccurred()) + + // patch pod + p := &corev1.Pod{} + err = c.Get(ctx, client.ObjectKey{Name: "bar", Namespace: "one"}, p) + g.Expect(err).ToNot(HaveOccurred()) + podWithAnnotations := p.DeepCopy() + podWithAnnotations.SetAnnotations(map[string]string{"foo": "bar"}) + err = c.Patch(ctx, podWithAnnotations, client.MergeFrom(p)) + g.Expect(err).ToNot(HaveOccurred()) + + // delete pod + err = c.Delete(ctx, p) + g.Expect(err).ToNot(HaveOccurred()) + + // Wait a second to ensure all events have been flushed. + time.Sleep(time.Second) + + // Send a done signal to close the test goroutine. + done <- true + + // Each event should be the same and in the same order. + g.Expect(receivedEvents).To(Equal(expectedEvents)) +} + +func setupWorkloadClusterListener(g Gomega, ports CustomPorts) (*WorkloadClustersMux, client.WithWatch) { + manager := inmemoryruntime.NewManager(scheme) + + host := "127.0.0.1" + wcmux, err := NewWorkloadClustersMux(manager, host, ports) + g.Expect(err).ToNot(HaveOccurred()) + + // InfraCluster controller >> when "creating the load balancer" + wcl1 := "workload-cluster1" + manager.AddResourceGroup(wcl1) + + listener, err := wcmux.InitWorkloadClusterListener(wcl1) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(listener.Host()).To(Equal(host)) + g.Expect(listener.Port()).ToNot(BeZero()) + + caCert, caKey, err := newCertificateAuthority() + g.Expect(err).ToNot(HaveOccurred()) + + // InfraMachine controller >> when "creating the API Server pod" + apiServerPod1 := "kube-apiserver-1" + err = wcmux.AddAPIServer(wcl1, apiServerPod1, caCert, caKey) + g.Expect(err).ToNot(HaveOccurred()) + + etcdCert, etcdKey, err := newCertificateAuthority() + g.Expect(err).ToNot(HaveOccurred()) + + // InfraMachine controller >> when "creating the Etcd member pod" + etcdPodMember1 := "etcd-1" + err = wcmux.AddEtcdMember(wcl1, etcdPodMember1, etcdCert, etcdKey) + g.Expect(err).ToNot(HaveOccurred()) + + // Test API using a controller runtime client to call it. + c, err := listener.GetClient() + g.Expect(err).ToNot(HaveOccurred()) + + return wcmux, c +} + +// newCertificateAuthority creates new certificate and private key for the certificate authority. +func newCertificateAuthority() (*x509.Certificate, *rsa.PrivateKey, error) { + key, err := certs.NewPrivateKey() + if err != nil { + return nil, nil, err + } + + c, err := newSelfSignedCACert(key) + if err != nil { + return nil, nil, err + } + + return c, key, nil +} + +// newSelfSignedCACert creates a CA certificate. +func newSelfSignedCACert(key *rsa.PrivateKey) (*x509.Certificate, error) { + cfg := certs.Config{ + CommonName: "kubernetes", + } + + now := time.Now().UTC() + + tmpl := x509.Certificate{ + SerialNumber: new(big.Int).SetInt64(0), + Subject: pkix.Name{ + CommonName: cfg.CommonName, + Organization: cfg.Organization, + }, + NotBefore: now.Add(time.Minute * -5), + NotAfter: now.Add(time.Hour * 24 * 365 * 10), // 10 years + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + MaxPathLenZero: true, + BasicConstraintsValid: true, + MaxPathLen: 0, + IsCA: true, + } + + b, err := x509.CreateCertificate(cryptorand.Reader, &tmpl, &tmpl, key.Public(), key) + if err != nil { + return nil, errors.Wrapf(err, "failed to create self signed CA certificate: %+v", tmpl) + } + + c, err := x509.ParseCertificate(b) + return c, errors.WithStack(err) +} + +func apiServerEtcdClientCertificateConfig() *certs.Config { + return &certs.Config{ + CommonName: "apiserver-etcd-client", + Organization: []string{"system:masters"}, // TODO: check if we can drop + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + } +} diff --git a/test/infrastructure/tmp-to-be-deleted/server/proxy/addr.go b/test/infrastructure/tmp-to-be-deleted/server/proxy/addr.go new file mode 100644 index 0000000000..f5aea1c062 --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/server/proxy/addr.go @@ -0,0 +1,59 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package proxy implements kubeadm proxy functionality. +package proxy + +import ( + "fmt" + "net" + + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/tools/portforward" +) + +const scheme string = "proxy" + +// Addr defines a proxy net/addr format. +type Addr struct { + net.Addr + port string + identifier uint32 +} + +// Network returns a fake network. +func (a Addr) Network() string { + return portforward.PortForwardProtocolV1Name +} + +// String returns encoded information about the connection. +func (a Addr) String() string { + return fmt.Sprintf( + "%s://%d.%s.local:%s", + scheme, + a.identifier, + portforward.PortForwardProtocolV1Name, + a.port, + ) +} + +// NewAddrFromConn creates an Addr from the given connection. +func NewAddrFromConn(c *Conn) Addr { + return Addr{ + port: c.stream.Headers().Get(corev1.PortHeader), + identifier: c.stream.Identifier(), + } +} diff --git a/test/infrastructure/tmp-to-be-deleted/server/proxy/conn.go b/test/infrastructure/tmp-to-be-deleted/server/proxy/conn.go new file mode 100644 index 0000000000..e799a7a33f --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/server/proxy/conn.go @@ -0,0 +1,87 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proxy + +import ( + "net" + "time" + + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/httpstream" +) + +// Conn is a Kubernetes API server proxied type of net/conn. +type Conn struct { + connection httpstream.Connection + stream httpstream.Stream + readDeadline time.Time + writeDeadline time.Time +} + +// Read from the connection. +func (c *Conn) Read(b []byte) (n int, err error) { + return c.stream.Read(b) +} + +// Close the underlying proxied connection. +func (c *Conn) Close() error { + return kerrors.NewAggregate([]error{c.stream.Close(), c.connection.Close()}) +} + +// Write to the connection. +func (c *Conn) Write(b []byte) (n int, err error) { + return c.stream.Write(b) +} + +// LocalAddr returns a fake address representing the proxied connection. +func (c *Conn) LocalAddr() net.Addr { + return NewAddrFromConn(c) +} + +// RemoteAddr returns a fake address representing the proxied connection. +func (c *Conn) RemoteAddr() net.Addr { + return NewAddrFromConn(c) +} + +// SetDeadline sets the read and write deadlines to the specified interval. +func (c *Conn) SetDeadline(t time.Time) error { + // TODO: Handle deadlines + c.readDeadline = t + c.writeDeadline = t + return nil +} + +// SetWriteDeadline sets the read and write deadlines to the specified interval. +func (c *Conn) SetWriteDeadline(t time.Time) error { + c.writeDeadline = t + return nil +} + +// SetReadDeadline sets the read and write deadlines to the specified interval. +func (c *Conn) SetReadDeadline(t time.Time) error { + c.readDeadline = t + return nil +} + +// NewConn creates a new net/conn interface based on an underlying Kubernetes +// API server proxy connection. +func NewConn(connection httpstream.Connection, stream httpstream.Stream) *Conn { + return &Conn{ + connection: connection, + stream: stream, + } +} diff --git a/test/infrastructure/tmp-to-be-deleted/server/proxy/dial.go b/test/infrastructure/tmp-to-be-deleted/server/proxy/dial.go new file mode 100644 index 0000000000..5527201735 --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/server/proxy/dial.go @@ -0,0 +1,158 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proxy + +import ( + "context" + "fmt" + "net" + "net/http" + "time" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/portforward" + "k8s.io/client-go/transport/spdy" +) + +const defaultTimeout = 10 * time.Second + +// Dialer creates connections using Kubernetes API Server port-forwarding. +type Dialer struct { + proxy Proxy + clientset *kubernetes.Clientset + proxyTransport http.RoundTripper + upgrader spdy.Upgrader + timeout time.Duration +} + +// NewDialer creates a new dialer for a given API server scope. +func NewDialer(p Proxy, options ...func(*Dialer) error) (*Dialer, error) { + if p.Port == 0 { + return nil, errors.New("port required") + } + + dialer := &Dialer{ + proxy: p, + } + + for _, option := range options { + err := option(dialer) + if err != nil { + return nil, err + } + } + + if dialer.timeout == 0 { + dialer.timeout = defaultTimeout + } + p.KubeConfig.Timeout = dialer.timeout + clientset, err := kubernetes.NewForConfig(p.KubeConfig) + if err != nil { + return nil, err + } + proxyTransport, upgrader, err := spdy.RoundTripperFor(p.KubeConfig) + if err != nil { + return nil, err + } + dialer.proxyTransport = proxyTransport + dialer.upgrader = upgrader + dialer.clientset = clientset + return dialer, nil +} + +// DialContextWithAddr is a GO grpc compliant dialer construct. +func (d *Dialer) DialContextWithAddr(ctx context.Context, addr string) (net.Conn, error) { + return d.DialContext(ctx, scheme, addr) +} + +// DialContext creates proxied port-forwarded connections. +// ctx is currently unused, but fulfils the type signature used by GRPC. +func (d *Dialer) DialContext(_ context.Context, _ string, addr string) (net.Conn, error) { + req := d.clientset.CoreV1().RESTClient(). + Post(). + Resource(d.proxy.Kind). + Namespace(d.proxy.Namespace). + Name(addr). + SubResource("portforward") + + dialer := spdy.NewDialer(d.upgrader, &http.Client{Transport: d.proxyTransport}, "POST", req.URL()) + + // Create a new connection from the dialer. + // + // Warning: Any early return should close this connection, otherwise we're going to leak them. + connection, _, err := dialer.Dial(portforward.PortForwardProtocolV1Name) + if err != nil { + return nil, errors.Wrap(err, "error upgrading connection") + } + + // Create the headers. + headers := http.Header{} + + // Set the header port number to match the proxy one. + headers.Set(corev1.PortHeader, fmt.Sprintf("%d", d.proxy.Port)) + + // We only create a single stream over the connection + headers.Set(corev1.PortForwardRequestIDHeader, "0") + + // Create the error stream. + headers.Set(corev1.StreamType, corev1.StreamTypeError) + errorStream, err := connection.CreateStream(headers) + if err != nil { + return nil, kerrors.NewAggregate([]error{ + err, + connection.Close(), + }) + } + // Close the error stream right away, we're not writing to it. + if err := errorStream.Close(); err != nil { + return nil, kerrors.NewAggregate([]error{ + err, + connection.Close(), + }) + } + + // Create the data stream. + // + // NOTE: Given that we're reusing the headers, + // we need to overwrite the stream type before creating it. + headers.Set(corev1.StreamType, corev1.StreamTypeData) + dataStream, err := connection.CreateStream(headers) + if err != nil { + return nil, kerrors.NewAggregate([]error{ + errors.Wrap(err, "error creating forwarding stream"), + connection.Close(), + }) + } + + // Create the net.Conn and return. + return NewConn(connection, dataStream), nil +} + +// DialTimeout sets the timeout. +func DialTimeout(duration time.Duration) func(*Dialer) error { + return func(d *Dialer) error { + return d.setTimeout(duration) + } +} + +func (d *Dialer) setTimeout(duration time.Duration) error { + d.timeout = duration + return nil +} diff --git a/test/infrastructure/tmp-to-be-deleted/server/proxy/doc.go b/test/infrastructure/tmp-to-be-deleted/server/proxy/doc.go new file mode 100644 index 0000000000..4a2c1696ea --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/server/proxy/doc.go @@ -0,0 +1,25 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package proxy is a copy of sigs.k8s.io/cluster-api//controlplane/kubeadm/internal/proxy/addr.go. + +It provides utilities for calling a service via a port forwarded connection, and we are using it +to API's fake port forward implementation. + +TODO: Consider re-using the copied package from KCP. +*/ +package proxy diff --git a/test/infrastructure/tmp-to-be-deleted/server/proxy/proxy.go b/test/infrastructure/tmp-to-be-deleted/server/proxy/proxy.go new file mode 100644 index 0000000000..58d05f2bfa --- /dev/null +++ b/test/infrastructure/tmp-to-be-deleted/server/proxy/proxy.go @@ -0,0 +1,46 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proxy + +import ( + "time" + + "k8s.io/client-go/rest" +) + +// Proxy defines the API server port-forwarded proxy. +type Proxy struct { + + // Kind is the kind of Kubernetes resource + Kind string + + // Namespace is the namespace in which the Kubernetes resource exists + Namespace string + + // ResourceName is the name of the Kubernetes resource + ResourceName string + + // KubeConfig is the config to connect to the API server + KubeConfig *rest.Config + + // KeepAlive specifies how often a keep alive message is sent to hold + // the connection open + KeepAlive *time.Duration + + // Port is the port to be forwarded from the relevant resource + Port int +} diff --git a/test/infrastructure/vcsim/Dockerfile b/test/infrastructure/vcsim/Dockerfile new file mode 100644 index 0000000000..ed964e1e97 --- /dev/null +++ b/test/infrastructure/vcsim/Dockerfile @@ -0,0 +1,80 @@ +# syntax=docker/dockerfile:1.4 + +# Copyright 2024 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Build the manager binary +# Run this with docker build --build-arg builder_image= +ARG builder_image + +# Build architecture +ARG ARCH + +# Ignore Hadolint rule "Always tag the version of an image explicitly." +# It's an invalid finding since the image is explicitly set in the Makefile. +# https://github.com/hadolint/hadolint/wiki/DL3006 +# hadolint ignore=DL3006 +FROM ${builder_image} as builder +WORKDIR /workspace + +# Run this with docker build --build-arg goproxy=$(go env GOPROXY) to override the goproxy +ARG goproxy=https://proxy.golang.org +ENV GOPROXY=$goproxy + +# Copy the Go Modules manifests +COPY go.mod go.mod +COPY go.sum go.sum + +# Essentially, change directories into the test go module +WORKDIR /workspace/test +# Copy the Go Modules manifests +COPY test/go.mod go.mod +COPY test/go.sum go.sum + +# Cache deps before building and copying source so that we don't need to re-download as much +# and so that source changes don't invalidate our downloaded layer +RUN --mount=type=cache,target=/go/pkg/mod \ + go mod download + +# This needs to build with the entire Cluster API context +WORKDIR /workspace +# Copy the sources (which includes the test/infrastructure/inmemory subdirectory) +COPY ./ ./ + +# Essentially, change directories into vcsim +WORKDIR /workspace/test/infrastructure/vcsim + +# Cache the go build into the Go’s compiler cache folder so we take benefits of compiler caching across docker build calls +RUN --mount=type=cache,target=/root/.cache/go-build \ + --mount=type=cache,target=/go/pkg/mod \ + go build . + +# Build +ARG ARCH +ARG ldflags + +# Do not force rebuild of up-to-date packages (do not use -a) and use the compiler cache folder +RUN --mount=type=cache,target=/root/.cache/go-build \ + --mount=type=cache,target=/go/pkg/mod \ + CGO_ENABLED=0 GOOS=linux GOARCH=${ARCH} \ + go build -trimpath -ldflags "${ldflags} -extldflags '-static'" \ + -o /workspace/manager . + + +FROM gcr.io/distroless/static:nonroot-${ARCH} +WORKDIR / +COPY --from=builder /workspace/manager . +# Use uid of nonroot user (65532) because kubernetes expects numeric user when applying pod security policies +USER 65532 +ENTRYPOINT ["/manager"] diff --git a/test/infrastructure/vcsim/README.md b/test/infrastructure/vcsim/README.md new file mode 100644 index 0000000000..97dbd16e62 --- /dev/null +++ b/test/infrastructure/vcsim/README.md @@ -0,0 +1,4 @@ +# vcsim controller + +vcsim controller provides one or more vcsim instances, as well as the fake API server / etcd running on the +simulated machines. diff --git a/test/infrastructure/vcsim/api/v1alpha1/.import-restrictions b/test/infrastructure/vcsim/api/v1alpha1/.import-restrictions new file mode 100644 index 0000000000..a2e1dfd081 --- /dev/null +++ b/test/infrastructure/vcsim/api/v1alpha1/.import-restrictions @@ -0,0 +1,5 @@ +rules: + - selectorRegexp: sigs[.]k8s[.]io/controller-runtime + allowedPrefixes: [] + forbiddenPrefixes: + - "sigs.k8s.io/controller-runtime" diff --git a/test/infrastructure/vcsim/api/v1alpha1/controlplaneendpoint_types.go b/test/infrastructure/vcsim/api/v1alpha1/controlplaneendpoint_types.go new file mode 100644 index 0000000000..46fb7f9f90 --- /dev/null +++ b/test/infrastructure/vcsim/api/v1alpha1/controlplaneendpoint_types.go @@ -0,0 +1,68 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + // ControlPlaneEndpointFinalizer allows ControlPlaneEndpointReconciler to clean up resources associated with ControlPlaneEndpoint before + // removing it from the API server. + ControlPlaneEndpointFinalizer = "control-plane-endpoint.vcsim.infrastructure.cluster.x-k8s.io" +) + +// ControlPlaneEndpointSpec defines the desired state of the ControlPlaneEndpoint. +type ControlPlaneEndpointSpec struct { +} + +// ControlPlaneEndpointStatus defines the observed state of the ControlPlaneEndpoint. +type ControlPlaneEndpointStatus struct { + // The control plane host. + Host string `json:"host,omitempty"` + + // The control plane port. + Port int `json:"port,omitempty"` +} + +// +kubebuilder:resource:path=controlplaneendpoints,scope=Namespaced,categories=cluster-api +// +kubebuilder:subresource:status +// +kubebuilder:storageversion +// +kubebuilder:object:root=true + +// ControlPlaneEndpoint is the schema for a cluster virtual ip. +// IMPORTANT: The name of the ControlPlaneEndpoint should match the name of the cluster. +type ControlPlaneEndpoint struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ControlPlaneEndpointSpec `json:"spec,omitempty"` + Status ControlPlaneEndpointStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ControlPlaneEndpointList contains a list of ControlPlaneEndpoint. +type ControlPlaneEndpointList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ControlPlaneEndpoint `json:"items"` +} + +func init() { + objectTypes = append(objectTypes, &ControlPlaneEndpoint{}, &ControlPlaneEndpointList{}) +} diff --git a/test/infrastructure/vcsim/api/v1alpha1/doc.go b/test/infrastructure/vcsim/api/v1alpha1/doc.go new file mode 100644 index 0000000000..3f5b00b08b --- /dev/null +++ b/test/infrastructure/vcsim/api/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha1 contains API Schema definitions for the vcsim API group +// +kubebuilder:object:generate=true +// +groupName=vcsim.infrastructure.cluster.x-k8s.io +package v1alpha1 diff --git a/test/infrastructure/vcsim/api/v1alpha1/envvar_types.go b/test/infrastructure/vcsim/api/v1alpha1/envvar_types.go new file mode 100644 index 0000000000..3a83b62d6b --- /dev/null +++ b/test/infrastructure/vcsim/api/v1alpha1/envvar_types.go @@ -0,0 +1,96 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EnvVarSpec defines the desired state of the EnvVar. +type EnvVarSpec struct { + VCenterSimulator string `json:"vCenterSimulator,omitempty"` + Cluster ClusterEnvVarSpec `json:"cluster,omitempty"` +} + +// ClusterEnvVarSpec defines the spec for the EnvVar generator targeting a specific Cluster API cluster. +type ClusterEnvVarSpec struct { + // The name of the Cluster API cluster. + Name string `json:"name"` + + // The Kubernetes version of the Cluster API cluster. + // NOTE: This variable isn't related to the vcsim controller, but we are handling it here + // in order to have a single point of control for all the variables related to a Cluster API template. + // Default: v1.28.0 + KubernetesVersion *string `json:"kubernetesVersion,omitempty"` + + // The number of the control plane machines in the Cluster API cluster. + // NOTE: This variable isn't related to the vcsim controller, but we are handling it here + // in order to have a single point of control for all the variables related to a Cluster API template. + // Default: 1 + ControlPlaneMachines *int `json:"controlPlaneMachines,omitempty"` + + // The number of the worker machines in the Cluster API cluster. + // NOTE: This variable isn't related to the vcsim controller, but we are handling it here + // in order to have a single point of control for all the variables related to a Cluster API template. + // Default: 1 + WorkerMachines *int `json:"workerMachines,omitempty"` + + // Datacenter specifies the Datacenter for the Cluster API cluster. + // Default: 0 (DC0) + Datacenter *int `json:"datacenter,omitempty"` + + // Cluster specifies the VCenter Cluster for the Cluster API cluster. + // Default: 0 (C0) + Cluster *int `json:"cluster,omitempty"` + + // Datastore specifies the Datastore for the Cluster API cluster. + // Default: 0 (LocalDS_0) + Datastore *int `json:"datastore,omitempty"` +} + +// EnvVarStatus defines the observed state of the EnvVar. +type EnvVarStatus struct { + // variables to use when creating the Cluster API cluster. + Variables map[string]string `json:"variables,omitempty"` +} + +// +kubebuilder:resource:path=envvars,scope=Namespaced,categories=cluster-api +// +kubebuilder:subresource:status +// +kubebuilder:storageversion +// +kubebuilder:object:root=true + +// EnvVar is the schema for a EnvVar generator. +type EnvVar struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec EnvVarSpec `json:"spec,omitempty"` + Status EnvVarStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// EnvVarList contains a list of EnvVar. +type EnvVarList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []EnvVar `json:"items"` +} + +func init() { + objectTypes = append(objectTypes, &EnvVar{}, &EnvVarList{}) +} diff --git a/test/infrastructure/vcsim/api/v1alpha1/groupversion_info.go b/test/infrastructure/vcsim/api/v1alpha1/groupversion_info.go new file mode 100644 index 0000000000..1fb5311ecc --- /dev/null +++ b/test/infrastructure/vcsim/api/v1alpha1/groupversion_info.go @@ -0,0 +1,51 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + // Version is the API version. + Version = "v1alpha1" + + // GroupName is the name of the API group. + GroupName = "vcsim.infrastructure.cluster.x-k8s.io" +) + +var ( + // GroupVersion is group version used to register these objects. + GroupVersion = schema.GroupVersion{Group: GroupName, Version: Version} + + // schemeBuilder is used to add go types to the GroupVersionKind scheme. + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = schemeBuilder.AddToScheme + + // objectTypes contains all types to be registered to the GroupVersion. + objectTypes = []runtime.Object{} +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, objectTypes...) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/test/infrastructure/vcsim/api/v1alpha1/vcsim_types.go b/test/infrastructure/vcsim/api/v1alpha1/vcsim_types.go new file mode 100644 index 0000000000..6b0ca81701 --- /dev/null +++ b/test/infrastructure/vcsim/api/v1alpha1/vcsim_types.go @@ -0,0 +1,118 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + // VCenterFinalizer allows VCenterReconciler to clean up resources associated with VCenter before + // removing it from the API server. + VCenterFinalizer = "vcenter.vcsim.infrastructure.cluster.x-k8s.io" +) + +// VCenterSimulatorSpec defines the desired state of the VCenterSimulator. +type VCenterSimulatorSpec struct { + Model *VCenterSimulatorModel `json:"model,omitempty"` +} + +// VCenterSimulatorModel defines the model to be used by the VCenterSimulator. +type VCenterSimulatorModel struct { + // VSphereVersion specifies the VSphere version to use + // Default: 7.0.0 (the minimal vCenter version required by CAPV, vcsim default is 6.5) + VSphereVersion *string `json:"vsphereVersion,omitempty"` + + // Datacenter specifies the number of Datacenter entities to create + // Name prefix: DC, vcsim flag: -dc + // Default: 1 + Datacenter *int `json:"datacenter,omitempty"` + + // Cluster specifies the number of ClusterComputeResource entities to create per Datacenter + // Name prefix: C, vcsim flag: -cluster + // Default: 1 + Cluster *int `json:"cluster,omitempty"` + + // ClusterHost specifies the number of HostSystems entities to create within a Cluster + // Name prefix: H, vcsim flag: -host + // Default: 3 + ClusterHost *int `json:"clusterHost,omitempty"` + + // Pool specifies the number of ResourcePool entities to create per Cluster + // Note that every cluster has a root ResourcePool named "Resources", as real vCenter does. + // For example: /DC0/host/DC0_C0/Resources + // The root ResourcePool is named "RP0" within other object names. + // When Model.Pool is set to 1 or higher, this creates child ResourcePools under the root pool. + // Note that this flag is not effective on standalone hosts (ESXi without vCenter). + // For example: /DC0/host/DC0_C0/Resources/DC0_C0_RP1 + // Name prefix: RP, vcsim flag: -pool + // Default: 0 + // TODO: model pool selection for each cluster; for now ResourcePool named "Resources" will be always used + // but ideally we should use RPx as per documentation above. + Pool *int `json:"pool,omitempty"` + + // Datastore specifies the number of Datastore entities to create + // Each Datastore will have temporary local file storage and will be mounted + // on every HostSystem created by the ModelConfig + // Name prefix: LocalDS, vcsim flag: -ds + // Default: 1 + Datastore *int `json:"datastore,omitempty"` + + // TODO: consider if to add options for creating more folders, networks, custom storage policies +} + +// VCenterSimulatorStatus defines the observed state of the VCenterSimulator. +type VCenterSimulatorStatus struct { + // The vcsim server url's host. + Host string `json:"host,omitempty"` + + // The vcsim server username. + Username string `json:"username,omitempty"` + + // The vcsim server password. + Password string `json:"password,omitempty"` + + // The vcsim server thumbprint. + Thumbprint string `json:"thumbprint,omitempty"` +} + +// +kubebuilder:resource:path=vcentersimulators,scope=Namespaced,categories=cluster-api +// +kubebuilder:subresource:status +// +kubebuilder:storageversion +// +kubebuilder:object:root=true + +// VCenterSimulator is the schema for a VCenter simulator server. +type VCenterSimulator struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec VCenterSimulatorSpec `json:"spec,omitempty"` + Status VCenterSimulatorStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// VCenterSimulatorList contains a list of VCenterSimulator. +type VCenterSimulatorList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []VCenterSimulator `json:"items"` +} + +func init() { + objectTypes = append(objectTypes, &VCenterSimulator{}, &VCenterSimulatorList{}) +} diff --git a/test/infrastructure/vcsim/api/v1alpha1/zz_generated.deepcopy.go b/test/infrastructure/vcsim/api/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..5399c77584 --- /dev/null +++ b/test/infrastructure/vcsim/api/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,395 @@ +//go:build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterEnvVarSpec) DeepCopyInto(out *ClusterEnvVarSpec) { + *out = *in + if in.KubernetesVersion != nil { + in, out := &in.KubernetesVersion, &out.KubernetesVersion + *out = new(string) + **out = **in + } + if in.ControlPlaneMachines != nil { + in, out := &in.ControlPlaneMachines, &out.ControlPlaneMachines + *out = new(int) + **out = **in + } + if in.WorkerMachines != nil { + in, out := &in.WorkerMachines, &out.WorkerMachines + *out = new(int) + **out = **in + } + if in.Datacenter != nil { + in, out := &in.Datacenter, &out.Datacenter + *out = new(int) + **out = **in + } + if in.Cluster != nil { + in, out := &in.Cluster, &out.Cluster + *out = new(int) + **out = **in + } + if in.Datastore != nil { + in, out := &in.Datastore, &out.Datastore + *out = new(int) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterEnvVarSpec. +func (in *ClusterEnvVarSpec) DeepCopy() *ClusterEnvVarSpec { + if in == nil { + return nil + } + out := new(ClusterEnvVarSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneEndpoint) DeepCopyInto(out *ControlPlaneEndpoint) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneEndpoint. +func (in *ControlPlaneEndpoint) DeepCopy() *ControlPlaneEndpoint { + if in == nil { + return nil + } + out := new(ControlPlaneEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControlPlaneEndpoint) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneEndpointList) DeepCopyInto(out *ControlPlaneEndpointList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ControlPlaneEndpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneEndpointList. +func (in *ControlPlaneEndpointList) DeepCopy() *ControlPlaneEndpointList { + if in == nil { + return nil + } + out := new(ControlPlaneEndpointList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControlPlaneEndpointList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneEndpointSpec) DeepCopyInto(out *ControlPlaneEndpointSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneEndpointSpec. +func (in *ControlPlaneEndpointSpec) DeepCopy() *ControlPlaneEndpointSpec { + if in == nil { + return nil + } + out := new(ControlPlaneEndpointSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneEndpointStatus) DeepCopyInto(out *ControlPlaneEndpointStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneEndpointStatus. +func (in *ControlPlaneEndpointStatus) DeepCopy() *ControlPlaneEndpointStatus { + if in == nil { + return nil + } + out := new(ControlPlaneEndpointStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvVar) DeepCopyInto(out *EnvVar) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVar. +func (in *EnvVar) DeepCopy() *EnvVar { + if in == nil { + return nil + } + out := new(EnvVar) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EnvVar) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvVarList) DeepCopyInto(out *EnvVarList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVarList. +func (in *EnvVarList) DeepCopy() *EnvVarList { + if in == nil { + return nil + } + out := new(EnvVarList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EnvVarList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvVarSpec) DeepCopyInto(out *EnvVarSpec) { + *out = *in + in.Cluster.DeepCopyInto(&out.Cluster) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVarSpec. +func (in *EnvVarSpec) DeepCopy() *EnvVarSpec { + if in == nil { + return nil + } + out := new(EnvVarSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvVarStatus) DeepCopyInto(out *EnvVarStatus) { + *out = *in + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVarStatus. +func (in *EnvVarStatus) DeepCopy() *EnvVarStatus { + if in == nil { + return nil + } + out := new(EnvVarStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VCenterSimulator) DeepCopyInto(out *VCenterSimulator) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VCenterSimulator. +func (in *VCenterSimulator) DeepCopy() *VCenterSimulator { + if in == nil { + return nil + } + out := new(VCenterSimulator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VCenterSimulator) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VCenterSimulatorList) DeepCopyInto(out *VCenterSimulatorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VCenterSimulator, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VCenterSimulatorList. +func (in *VCenterSimulatorList) DeepCopy() *VCenterSimulatorList { + if in == nil { + return nil + } + out := new(VCenterSimulatorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VCenterSimulatorList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VCenterSimulatorModel) DeepCopyInto(out *VCenterSimulatorModel) { + *out = *in + if in.VSphereVersion != nil { + in, out := &in.VSphereVersion, &out.VSphereVersion + *out = new(string) + **out = **in + } + if in.Datacenter != nil { + in, out := &in.Datacenter, &out.Datacenter + *out = new(int) + **out = **in + } + if in.Cluster != nil { + in, out := &in.Cluster, &out.Cluster + *out = new(int) + **out = **in + } + if in.ClusterHost != nil { + in, out := &in.ClusterHost, &out.ClusterHost + *out = new(int) + **out = **in + } + if in.Pool != nil { + in, out := &in.Pool, &out.Pool + *out = new(int) + **out = **in + } + if in.Datastore != nil { + in, out := &in.Datastore, &out.Datastore + *out = new(int) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VCenterSimulatorModel. +func (in *VCenterSimulatorModel) DeepCopy() *VCenterSimulatorModel { + if in == nil { + return nil + } + out := new(VCenterSimulatorModel) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VCenterSimulatorSpec) DeepCopyInto(out *VCenterSimulatorSpec) { + *out = *in + if in.Model != nil { + in, out := &in.Model, &out.Model + *out = new(VCenterSimulatorModel) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VCenterSimulatorSpec. +func (in *VCenterSimulatorSpec) DeepCopy() *VCenterSimulatorSpec { + if in == nil { + return nil + } + out := new(VCenterSimulatorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VCenterSimulatorStatus) DeepCopyInto(out *VCenterSimulatorStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VCenterSimulatorStatus. +func (in *VCenterSimulatorStatus) DeepCopy() *VCenterSimulatorStatus { + if in == nil { + return nil + } + out := new(VCenterSimulatorStatus) + in.DeepCopyInto(out) + return out +} diff --git a/test/infrastructure/vcsim/config/certmanager/certificate.yaml b/test/infrastructure/vcsim/config/certmanager/certificate.yaml new file mode 100644 index 0000000000..4079986e89 --- /dev/null +++ b/test/infrastructure/vcsim/config/certmanager/certificate.yaml @@ -0,0 +1,24 @@ +# The following manifests contain a self-signed issuer CR and a certificate CR. +# More document can be found at https://docs.cert-manager.io +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: selfsigned-issuer + namespace: system +spec: + selfSigned: {} +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml + namespace: system +spec: + # $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize + dnsNames: + - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc + - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local + issuerRef: + kind: Issuer + name: selfsigned-issuer + secretName: $(SERVICE_NAME)-cert # this secret will not be prefixed, since it's not managed by kustomize diff --git a/test/infrastructure/vcsim/config/certmanager/kustomization.yaml b/test/infrastructure/vcsim/config/certmanager/kustomization.yaml new file mode 100644 index 0000000000..95f333f3f7 --- /dev/null +++ b/test/infrastructure/vcsim/config/certmanager/kustomization.yaml @@ -0,0 +1,5 @@ +resources: + - certificate.yaml + +configurations: + - kustomizeconfig.yaml diff --git a/test/infrastructure/vcsim/config/certmanager/kustomizeconfig.yaml b/test/infrastructure/vcsim/config/certmanager/kustomizeconfig.yaml new file mode 100644 index 0000000000..c6a6c0f1e0 --- /dev/null +++ b/test/infrastructure/vcsim/config/certmanager/kustomizeconfig.yaml @@ -0,0 +1,19 @@ +# This configuration is for teaching kustomize how to update name ref and var substitution +nameReference: + - kind: Issuer + group: cert-manager.io + fieldSpecs: + - kind: Certificate + group: cert-manager.io + path: spec/issuerRef/name + +varReference: + - kind: Certificate + group: cert-manager.io + path: spec/commonName + - kind: Certificate + group: cert-manager.io + path: spec/dnsNames + - kind: Certificate + group: cert-manager.io + path: spec/secretName diff --git a/test/infrastructure/vcsim/config/crd/bases/vcsim.infrastructure.cluster.x-k8s.io_controlplaneendpoints.yaml b/test/infrastructure/vcsim/config/crd/bases/vcsim.infrastructure.cluster.x-k8s.io_controlplaneendpoints.yaml new file mode 100644 index 0000000000..f70f17596a --- /dev/null +++ b/test/infrastructure/vcsim/config/crd/bases/vcsim.infrastructure.cluster.x-k8s.io_controlplaneendpoints.yaml @@ -0,0 +1,57 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: controlplaneendpoints.vcsim.infrastructure.cluster.x-k8s.io +spec: + group: vcsim.infrastructure.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: ControlPlaneEndpoint + listKind: ControlPlaneEndpointList + plural: controlplaneendpoints + singular: controlplaneendpoint + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: 'ControlPlaneEndpoint is the schema for a cluster virtual ip. + IMPORTANT: The name of the ControlPlaneEndpoint should match the name of + the cluster.' + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ControlPlaneEndpointSpec defines the desired state of the + ControlPlaneEndpoint. + type: object + status: + description: ControlPlaneEndpointStatus defines the observed state of + the ControlPlaneEndpoint. + properties: + host: + description: The control plane host. + type: string + port: + description: The control plane port. + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/test/infrastructure/vcsim/config/crd/bases/vcsim.infrastructure.cluster.x-k8s.io_envvars.yaml b/test/infrastructure/vcsim/config/crd/bases/vcsim.infrastructure.cluster.x-k8s.io_envvars.yaml new file mode 100644 index 0000000000..9256ded9c3 --- /dev/null +++ b/test/infrastructure/vcsim/config/crd/bases/vcsim.infrastructure.cluster.x-k8s.io_envvars.yaml @@ -0,0 +1,98 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: envvars.vcsim.infrastructure.cluster.x-k8s.io +spec: + group: vcsim.infrastructure.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: EnvVar + listKind: EnvVarList + plural: envvars + singular: envvar + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: EnvVar is the schema for a EnvVar generator. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: EnvVarSpec defines the desired state of the EnvVar. + properties: + cluster: + description: ClusterEnvVarSpec defines the spec for the EnvVar generator + targeting a specific Cluster API cluster. + properties: + cluster: + description: 'Cluster specifies the VCenter Cluster for the Cluster + API cluster. Default: 0 (C0)' + type: integer + controlPlaneMachines: + description: 'The number of the control plane machines in the + Cluster API cluster. NOTE: This variable isn''t related to the + vcsim controller, but we are handling it here in order to have + a single point of control for all the variables related to a + Cluster API template. Default: 1' + type: integer + datacenter: + description: 'Datacenter specifies the Datacenter for the Cluster + API cluster. Default: 0 (DC0)' + type: integer + datastore: + description: 'Datastore specifies the Datastore for the Cluster + API cluster. Default: 0 (LocalDS_0)' + type: integer + kubernetesVersion: + description: 'The Kubernetes version of the Cluster API cluster. + NOTE: This variable isn''t related to the vcsim controller, + but we are handling it here in order to have a single point + of control for all the variables related to a Cluster API template. + Default: v1.28.0' + type: string + name: + description: The name of the Cluster API cluster. + type: string + workerMachines: + description: 'The number of the worker machines in the Cluster + API cluster. NOTE: This variable isn''t related to the vcsim + controller, but we are handling it here in order to have a single + point of control for all the variables related to a Cluster + API template. Default: 1' + type: integer + required: + - name + type: object + vCenterSimulator: + type: string + type: object + status: + description: EnvVarStatus defines the observed state of the EnvVar. + properties: + variables: + additionalProperties: + type: string + description: variables to use when creating the Cluster API cluster. + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/test/infrastructure/vcsim/config/crd/bases/vcsim.infrastructure.cluster.x-k8s.io_vcentersimulators.yaml b/test/infrastructure/vcsim/config/crd/bases/vcsim.infrastructure.cluster.x-k8s.io_vcentersimulators.yaml new file mode 100644 index 0000000000..886fef3a46 --- /dev/null +++ b/test/infrastructure/vcsim/config/crd/bases/vcsim.infrastructure.cluster.x-k8s.io_vcentersimulators.yaml @@ -0,0 +1,104 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: vcentersimulators.vcsim.infrastructure.cluster.x-k8s.io +spec: + group: vcsim.infrastructure.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: VCenterSimulator + listKind: VCenterSimulatorList + plural: vcentersimulators + singular: vcentersimulator + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: VCenterSimulator is the schema for a VCenter simulator server. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: VCenterSimulatorSpec defines the desired state of the VCenterSimulator. + properties: + model: + description: VCenterSimulatorModel defines the model to be used by + the VCenterSimulator. + properties: + cluster: + description: 'Cluster specifies the number of ClusterComputeResource + entities to create per Datacenter Name prefix: C, vcsim flag: + -cluster Default: 1' + type: integer + clusterHost: + description: 'ClusterHost specifies the number of HostSystems + entities to create within a Cluster Name prefix: H, vcsim flag: + -host Default: 3' + type: integer + datacenter: + description: 'Datacenter specifies the number of Datacenter entities + to create Name prefix: DC, vcsim flag: -dc Default: 1' + type: integer + datastore: + description: 'Datastore specifies the number of Datastore entities + to create Each Datastore will have temporary local file storage + and will be mounted on every HostSystem created by the ModelConfig + Name prefix: LocalDS, vcsim flag: -ds Default: 1' + type: integer + pool: + description: 'Pool specifies the number of ResourcePool entities + to create per Cluster Note that every cluster has a root ResourcePool + named "Resources", as real vCenter does. For example: /DC0/host/DC0_C0/Resources + The root ResourcePool is named "RP0" within other object names. + When Model.Pool is set to 1 or higher, this creates child ResourcePools + under the root pool. Note that this flag is not effective on + standalone hosts (ESXi without vCenter). For example: /DC0/host/DC0_C0/Resources/DC0_C0_RP1 + Name prefix: RP, vcsim flag: -pool Default: 0 TODO: model pool + selection for each cluster; for now ResourcePool named "Resources" + will be always used but ideally we should use RPx as per documentation + above.' + type: integer + vsphereVersion: + description: 'VSphereVersion specifies the VSphere version to + use Default: 7.0.0 (the minimal vCenter version required by + CAPV, vcsim default is 6.5)' + type: string + type: object + type: object + status: + description: VCenterSimulatorStatus defines the observed state of the + VCenterSimulator. + properties: + host: + description: The vcsim server url's host. + type: string + password: + description: The vcsim server password. + type: string + thumbprint: + description: The vcsim server thumbprint. + type: string + username: + description: The vcsim server username. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/test/infrastructure/vcsim/config/crd/kustomization.yaml b/test/infrastructure/vcsim/config/crd/kustomization.yaml new file mode 100644 index 0000000000..7523b46d0a --- /dev/null +++ b/test/infrastructure/vcsim/config/crd/kustomization.yaml @@ -0,0 +1,26 @@ +commonLabels: + cluster.x-k8s.io/v1beta1: v1alpha1 + +# This kustomization.yaml is not intended to be run by itself, +# since it depends on service name and namespace that are out of this kustomize package. +# It should be run by config/ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - bases/vcsim.infrastructure.cluster.x-k8s.io_vcentersimulators.yaml + - bases/vcsim.infrastructure.cluster.x-k8s.io_controlplaneendpoints.yaml + - bases/vcsim.infrastructure.cluster.x-k8s.io_envvars.yaml + +patchesStrategicMerge: + # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. + # patches here are for enabling the conversion webhook for each CRD + + # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. + # patches here are for enabling the CA injection for each CRD + - patches/cainjection_in_vcentersimulators.yaml + - patches/cainjection_in_controlplaneendpoints.yaml + - patches/cainjection_in_envvars.yaml + +# the following config is for teaching kustomize how to do kustomization for CRDs. +configurations: + - kustomizeconfig.yaml diff --git a/test/infrastructure/vcsim/config/crd/kustomizeconfig.yaml b/test/infrastructure/vcsim/config/crd/kustomizeconfig.yaml new file mode 100644 index 0000000000..8e2d8d6b17 --- /dev/null +++ b/test/infrastructure/vcsim/config/crd/kustomizeconfig.yaml @@ -0,0 +1,17 @@ +# This file is for teaching kustomize how to substitute name and namespace reference in CRD +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: CustomResourceDefinition + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/name + +namespace: +- kind: CustomResourceDefinition + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/namespace + create: false + +varReference: +- path: metadata/annotations diff --git a/test/infrastructure/vcsim/config/crd/patches/cainjection_in_controlplaneendpoints.yaml b/test/infrastructure/vcsim/config/crd/patches/cainjection_in_controlplaneendpoints.yaml new file mode 100644 index 0000000000..ad33cf2e68 --- /dev/null +++ b/test/infrastructure/vcsim/config/crd/patches/cainjection_in_controlplaneendpoints.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: controlplaneendpoints.vcsim.infrastructure.cluster.x-k8s.io diff --git a/test/infrastructure/vcsim/config/crd/patches/cainjection_in_envvars.yaml b/test/infrastructure/vcsim/config/crd/patches/cainjection_in_envvars.yaml new file mode 100644 index 0000000000..081cdd5230 --- /dev/null +++ b/test/infrastructure/vcsim/config/crd/patches/cainjection_in_envvars.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: envvars.vcsim.infrastructure.cluster.x-k8s.io diff --git a/test/infrastructure/vcsim/config/crd/patches/cainjection_in_vcentersimulators.yaml b/test/infrastructure/vcsim/config/crd/patches/cainjection_in_vcentersimulators.yaml new file mode 100644 index 0000000000..20a6458fa1 --- /dev/null +++ b/test/infrastructure/vcsim/config/crd/patches/cainjection_in_vcentersimulators.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: vcentersimulators.vcsim.infrastructure.cluster.x-k8s.io diff --git a/test/infrastructure/vcsim/config/default/kustomization.yaml b/test/infrastructure/vcsim/config/default/kustomization.yaml new file mode 100644 index 0000000000..be5c023f2e --- /dev/null +++ b/test/infrastructure/vcsim/config/default/kustomization.yaml @@ -0,0 +1,55 @@ +namespace: capvsim-system + +namePrefix: capvsim- + +commonLabels: + # capvsim is not a provider, but by adding this label + # we can get this installed by Cluster APIs Tiltfile. + cluster.x-k8s.io/provider: "infrastructure-vsphere-simulator" + +resources: + - namespace.yaml + +bases: + - ../crd + - ../rbac + - ../manager + - ../webhook + - ../certmanager + +patchesStrategicMerge: + # Provide customizable hook for make targets. + - manager_image_patch.yaml + - manager_pull_policy.yaml + - manager_webhook_patch.yaml + +vars: + - name: CERTIFICATE_NAMESPACE # namespace of the certificate CR + objref: + kind: Certificate + group: cert-manager.io + version: v1 + name: serving-cert # this name should match the one in certificate.yaml + fieldref: + fieldpath: metadata.namespace + - name: CERTIFICATE_NAME + objref: + kind: Certificate + group: cert-manager.io + version: v1 + name: serving-cert # this name should match the one in certificate.yaml + - name: SERVICE_NAMESPACE # namespace of the service + objref: + kind: Service + version: v1 + name: webhook-service + fieldref: + fieldpath: metadata.namespace + - name: SERVICE_NAME + objref: + kind: Service + version: v1 + name: webhook-service + +configurations: + - kustomizeconfig.yaml diff --git a/test/infrastructure/vcsim/config/default/kustomizeconfig.yaml b/test/infrastructure/vcsim/config/default/kustomizeconfig.yaml new file mode 100644 index 0000000000..eb191e64d0 --- /dev/null +++ b/test/infrastructure/vcsim/config/default/kustomizeconfig.yaml @@ -0,0 +1,4 @@ +# This configuration is for teaching kustomize how to update name ref and var substitution +varReference: +- kind: Deployment + path: spec/template/spec/volumes/secret/secretName diff --git a/test/infrastructure/vcsim/config/default/manager_image_patch.yaml b/test/infrastructure/vcsim/config/default/manager_image_patch.yaml new file mode 100644 index 0000000000..5d07c089b2 --- /dev/null +++ b/test/infrastructure/vcsim/config/default/manager_image_patch.yaml @@ -0,0 +1,11 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - image: gcr.io/k8s-staging-capi-vsphere/cluster-api-vcsim-controller:main + name: manager diff --git a/test/infrastructure/vcsim/config/default/manager_pull_policy.yaml b/test/infrastructure/vcsim/config/default/manager_pull_policy.yaml new file mode 100644 index 0000000000..cd7ae12c01 --- /dev/null +++ b/test/infrastructure/vcsim/config/default/manager_pull_policy.yaml @@ -0,0 +1,11 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + imagePullPolicy: IfNotPresent diff --git a/test/infrastructure/vcsim/config/default/manager_webhook_patch.yaml b/test/infrastructure/vcsim/config/default/manager_webhook_patch.yaml new file mode 100644 index 0000000000..f18fd10f99 --- /dev/null +++ b/test/infrastructure/vcsim/config/default/manager_webhook_patch.yaml @@ -0,0 +1,23 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + ports: + - containerPort: 9443 + name: webhook-server + protocol: TCP + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true + volumes: + - name: cert + secret: + secretName: $(SERVICE_NAME)-cert # this secret will not be prefixed, since it's not managed by kustomize + diff --git a/test/infrastructure/vcsim/config/default/namespace.yaml b/test/infrastructure/vcsim/config/default/namespace.yaml new file mode 100644 index 0000000000..8b55c3cd89 --- /dev/null +++ b/test/infrastructure/vcsim/config/default/namespace.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: controller-manager + name: system diff --git a/test/infrastructure/vcsim/config/manager/kustomization.yaml b/test/infrastructure/vcsim/config/manager/kustomization.yaml new file mode 100644 index 0000000000..5c5f0b84cb --- /dev/null +++ b/test/infrastructure/vcsim/config/manager/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- manager.yaml diff --git a/test/infrastructure/vcsim/config/manager/manager.yaml b/test/infrastructure/vcsim/config/manager/manager.yaml new file mode 100644 index 0000000000..fe7bc67cca --- /dev/null +++ b/test/infrastructure/vcsim/config/manager/manager.yaml @@ -0,0 +1,65 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system + labels: + control-plane: controller-manager +spec: + selector: + matchLabels: + control-plane: controller-manager + replicas: 1 + template: + metadata: + labels: + control-plane: controller-manager + spec: + containers: + - command: + - /manager + args: + - "--leader-elect" + - "--diagnostics-address=${CAPI_DIAGNOSTICS_ADDRESS:=:8443}" + - "--insecure-diagnostics=${CAPI_INSECURE_DIAGNOSTICS:=false}" + image: controller:latest + name: manager + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + ports: + - containerPort: 9440 + name: healthz + protocol: TCP + - containerPort: 8443 + name: metrics + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: healthz + livenessProbe: + httpGet: + path: /healthz + port: healthz + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + runAsUser: 65532 + runAsGroup: 65532 + terminationGracePeriodSeconds: 10 + serviceAccountName: manager + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/test/infrastructure/vcsim/config/rbac/kustomization.yaml b/test/infrastructure/vcsim/config/rbac/kustomization.yaml new file mode 100644 index 0000000000..6da23f4031 --- /dev/null +++ b/test/infrastructure/vcsim/config/rbac/kustomization.yaml @@ -0,0 +1,6 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- role.yaml +- role_binding.yaml +- service_account.yaml diff --git a/test/infrastructure/vcsim/config/rbac/role.yaml b/test/infrastructure/vcsim/config/rbac/role.yaml new file mode 100644 index 0000000000..d574c2ad2d --- /dev/null +++ b/test/infrastructure/vcsim/config/rbac/role.yaml @@ -0,0 +1,234 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: manager-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - get + - list + - watch +- apiGroups: + - "" + resources: + - resourcequotas + verbs: + - create + - get + - list + - watch +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - get + - list + - watch +- apiGroups: + - cluster.x-k8s.io + resources: + - clusters + verbs: + - get + - list + - watch +- apiGroups: + - cluster.x-k8s.io + resources: + - machines + verbs: + - get + - list + - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - vsphereclusteridentities + verbs: + - get + - list + - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - vsphereclusters + verbs: + - get + - list + - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - vspheremachines + verbs: + - get + - list + - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - vspherevms + verbs: + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - create + - get + - list + - watch +- apiGroups: + - topology.tanzu.vmware.com + resources: + - availabilityzones + verbs: + - create + - get + - list + - watch +- apiGroups: + - vcsim.infrastructure.cluster.x-k8s.io + resources: + - controlplaneendpoints + verbs: + - get + - list + - patch + - watch +- apiGroups: + - vcsim.infrastructure.cluster.x-k8s.io + resources: + - controlplaneendpoints/status + verbs: + - get + - patch + - update +- apiGroups: + - vcsim.infrastructure.cluster.x-k8s.io + resources: + - envvars + verbs: + - get + - list + - patch + - watch +- apiGroups: + - vcsim.infrastructure.cluster.x-k8s.io + resources: + - envvars/status + verbs: + - get + - patch + - update +- apiGroups: + - vcsim.infrastructure.cluster.x-k8s.io + resources: + - vcentersimulators + verbs: + - get + - list + - patch + - watch +- apiGroups: + - vcsim.infrastructure.cluster.x-k8s.io + resources: + - vcentersimulators/status + verbs: + - get + - patch + - update +- apiGroups: + - vmoperator.vmware.com + resources: + - contentlibraryproviders + verbs: + - create + - get + - list + - watch +- apiGroups: + - vmoperator.vmware.com + resources: + - contentsourcebindings + verbs: + - create + - get + - list + - watch +- apiGroups: + - vmoperator.vmware.com + resources: + - contentsources + verbs: + - create + - get + - list + - watch +- apiGroups: + - vmoperator.vmware.com + resources: + - virtualmachineclassbindings + verbs: + - create + - get + - list + - watch +- apiGroups: + - vmoperator.vmware.com + resources: + - virtualmachineclasses + verbs: + - create + - get + - list + - watch +- apiGroups: + - vmoperator.vmware.com + resources: + - virtualmachineimages + verbs: + - create + - get + - list + - watch +- apiGroups: + - vmoperator.vmware.com + resources: + - virtualmachines + verbs: + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - vmware.infrastructure.cluster.x-k8s.io + resources: + - vsphereclusters + verbs: + - get + - list + - watch +- apiGroups: + - vmware.infrastructure.cluster.x-k8s.io + resources: + - vspheremachines + verbs: + - get + - list + - watch diff --git a/test/infrastructure/vcsim/config/rbac/role_binding.yaml b/test/infrastructure/vcsim/config/rbac/role_binding.yaml new file mode 100644 index 0000000000..5a95f66d6f --- /dev/null +++ b/test/infrastructure/vcsim/config/rbac/role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: manager-role +subjects: +- kind: ServiceAccount + name: manager + namespace: system diff --git a/test/infrastructure/vcsim/config/rbac/service_account.yaml b/test/infrastructure/vcsim/config/rbac/service_account.yaml new file mode 100644 index 0000000000..77f747b53c --- /dev/null +++ b/test/infrastructure/vcsim/config/rbac/service_account.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: manager + namespace: system diff --git a/test/infrastructure/vcsim/config/webhook/kustomization.yaml b/test/infrastructure/vcsim/config/webhook/kustomization.yaml new file mode 100644 index 0000000000..66157d5d5f --- /dev/null +++ b/test/infrastructure/vcsim/config/webhook/kustomization.yaml @@ -0,0 +1,5 @@ +resources: +- service.yaml + +configurations: +- kustomizeconfig.yaml diff --git a/test/infrastructure/vcsim/config/webhook/kustomizeconfig.yaml b/test/infrastructure/vcsim/config/webhook/kustomizeconfig.yaml new file mode 100644 index 0000000000..25e21e3c96 --- /dev/null +++ b/test/infrastructure/vcsim/config/webhook/kustomizeconfig.yaml @@ -0,0 +1,25 @@ +# the following config is for teaching kustomize where to look at when substituting vars. +# It requires kustomize v2.1.0 or newer to work properly. +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: MutatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/name + - kind: ValidatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/name + +namespace: +- kind: MutatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/namespace + create: true +- kind: ValidatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/namespace + create: true + +varReference: +- path: metadata/annotations diff --git a/test/infrastructure/vcsim/config/webhook/service.yaml b/test/infrastructure/vcsim/config/webhook/service.yaml new file mode 100644 index 0000000000..711977f54f --- /dev/null +++ b/test/infrastructure/vcsim/config/webhook/service.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Service +metadata: + name: webhook-service + namespace: system +spec: + ports: + - port: 443 + targetPort: webhook-server diff --git a/test/infrastructure/vcsim/controllers/controlplaneendpoint_controller.go b/test/infrastructure/vcsim/controllers/controlplaneendpoint_controller.go new file mode 100644 index 0000000000..0bdeed3e32 --- /dev/null +++ b/test/infrastructure/vcsim/controllers/controlplaneendpoint_controller.go @@ -0,0 +1,148 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" + "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/cluster-api/util/predicates" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + inmemoryruntime "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/runtime" + inmemoryserver "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/server" + vcsimv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/api/v1alpha1" +) + +type ControlPlaneEndpointReconciler struct { + Client client.Client + + InMemoryManager inmemoryruntime.Manager + APIServerMux *inmemoryserver.WorkloadClustersMux + PodIP string + + // WatchFilterValue is the label value used to filter events prior to reconciliation. + WatchFilterValue string +} + +// +kubebuilder:rbac:groups=vcsim.infrastructure.cluster.x-k8s.io,resources=controlplaneendpoints,verbs=get;list;watch;patch +// +kubebuilder:rbac:groups=vcsim.infrastructure.cluster.x-k8s.io,resources=controlplaneendpoints/status,verbs=get;update;patch + +func (r *ControlPlaneEndpointReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { + // Fetch the ControlPlaneEndpoint instance + controlPlaneEndpoint := &vcsimv1.ControlPlaneEndpoint{} + if err := r.Client.Get(ctx, req.NamespacedName, controlPlaneEndpoint); err != nil { + if apierrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{}, err + } + + // Initialize the patch helper + patchHelper, err := patch.NewHelper(controlPlaneEndpoint, r.Client) + if err != nil { + return ctrl.Result{}, err + } + + // Always attempt to Patch the controlPlaneEndpoint object and status after each reconciliation. + defer func() { + if err := patchHelper.Patch(ctx, controlPlaneEndpoint); err != nil { + reterr = kerrors.NewAggregate([]error{reterr, err}) + } + }() + + // Handle deleted machines + if !controlPlaneEndpoint.DeletionTimestamp.IsZero() { + return ctrl.Result{}, r.reconcileDelete(ctx, controlPlaneEndpoint) + } + + // Add finalizer first if not set to avoid the race condition between init and delete. + // Note: Finalizers in general can only be added when the deletionTimestamp is not set. + if !controllerutil.ContainsFinalizer(controlPlaneEndpoint, vcsimv1.ControlPlaneEndpointFinalizer) { + controllerutil.AddFinalizer(controlPlaneEndpoint, vcsimv1.ControlPlaneEndpointFinalizer) + return ctrl.Result{}, nil + } + + // Handle non-deleted machines + return ctrl.Result{}, r.reconcileNormal(ctx, controlPlaneEndpoint) +} + +func (r *ControlPlaneEndpointReconciler) reconcileNormal(ctx context.Context, controlPlaneEndpoint *vcsimv1.ControlPlaneEndpoint) error { + log := ctrl.LoggerFrom(ctx) + log.Info("Reconciling VCSim ControlPlaneEndpoint") + + // NOTE: The name of the ControlPlaneEndpoint should match the name of the Cluster. + resourceGroup := klog.KObj(controlPlaneEndpoint).String() + + // Initialize a listener for the workload cluster. + // IMPORTANT: The fact that both the listener and the resourceGroup for a workload cluster have + // the same name is used as assumptions in other part of the implementation. + listener, err := r.APIServerMux.InitWorkloadClusterListener(resourceGroup) + if err != nil { + return errors.Wrapf(err, "failed to init the listener for the control plane endpoint") + } + + // Create a resource group for all the resources belonging the workload cluster. + // NOTE: We are storing in this resource group all the Kubernetes resources that are expected to exist on the workload cluster (e.g Nodes). + r.InMemoryManager.AddResourceGroup(resourceGroup) + + controlPlaneEndpoint.Status.Host = r.PodIP // NOTE: we are replacing the listener ip with the pod ip so it will be accessible from other pods as well + controlPlaneEndpoint.Status.Port = listener.Port() + + return nil +} + +func (r *ControlPlaneEndpointReconciler) reconcileDelete(ctx context.Context, controlPlaneEndpoint *vcsimv1.ControlPlaneEndpoint) error { + log := ctrl.LoggerFrom(ctx) + log.Info("Reconciling delete VCSim ControlPlaneEndpoint") + + // NOTE: The name of the ControlPlaneEndpoint should match the name of the Cluster. + resourceGroup := klog.KObj(controlPlaneEndpoint).String() + + // Delete the listener for the workload cluster; + if err := r.APIServerMux.DeleteWorkloadClusterListener(resourceGroup); err != nil { + return errors.Wrapf(err, "failed to delete the listener for the control plane endpoint") + } + + // Delete the resource group hosting all the cloud resources belonging the workload cluster; + r.InMemoryManager.DeleteResourceGroup(resourceGroup) + + controllerutil.RemoveFinalizer(controlPlaneEndpoint, vcsimv1.ControlPlaneEndpointFinalizer) + + return nil +} + +// SetupWithManager will add watches for this controller. +func (r *ControlPlaneEndpointReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { + err := ctrl.NewControllerManagedBy(mgr). + For(&vcsimv1.ControlPlaneEndpoint{}). + WithOptions(options). + WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). + Complete(r) + + if err != nil { + return errors.Wrap(err, "failed setting up with a controller manager") + } + return nil +} diff --git a/test/infrastructure/vcsim/controllers/controlplaneendpoint_controller_test.go b/test/infrastructure/vcsim/controllers/controlplaneendpoint_controller_test.go new file mode 100644 index 0000000000..446f6c8045 --- /dev/null +++ b/test/infrastructure/vcsim/controllers/controlplaneendpoint_controller_test.go @@ -0,0 +1,113 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "testing" + + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + inmemoryruntime "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/runtime" + inmemoryserver "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/server" + vcsimv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/api/v1alpha1" +) + +func Test_Reconcile_ControlPlaneEndpoint(t *testing.T) { + g := NewWithT(t) + + // Start a manager to handle resources that we are going to store in the fake API servers for the workload clusters. + workloadClustersManager := inmemoryruntime.NewManager(cloudScheme) + err := workloadClustersManager.Start(ctx) + g.Expect(err).ToNot(HaveOccurred()) + + // Start an Mux for the API servers for the workload clusters. + podIP := "127.0.0.1" + workloadClustersMux, err := inmemoryserver.NewWorkloadClustersMux(workloadClustersManager, podIP, inmemoryserver.CustomPorts{ + // NOTE: make sure to use ports different than other tests, so we can run tests in parallel + MinPort: inmemoryserver.DefaultMinPort + 100, + MaxPort: inmemoryserver.DefaultMinPort + 199, + DebugPort: inmemoryserver.DefaultDebugPort + 1, + }) + g.Expect(err).ToNot(HaveOccurred()) + + controlPlaneEndpoint := &vcsimv1.ControlPlaneEndpoint{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + Name: "foo", + Finalizers: []string{ + vcsimv1.ControlPlaneEndpointFinalizer, // Adding this to move past the first reconcile + }, + }, + } + + crclient := fake.NewClientBuilder().WithObjects(controlPlaneEndpoint).WithStatusSubresource(controlPlaneEndpoint).WithScheme(scheme).Build() + r := &ControlPlaneEndpointReconciler{ + Client: crclient, + InMemoryManager: workloadClustersManager, + APIServerMux: workloadClustersMux, + PodIP: podIP, + } + + // PART 1: Should create a new ControlPlaneEndpoint + + res, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ + Namespace: controlPlaneEndpoint.Namespace, + Name: controlPlaneEndpoint.Name, + }}) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res).To(Equal(ctrl.Result{})) + + // Gets the reconciled object + err = crclient.Get(ctx, client.ObjectKeyFromObject(controlPlaneEndpoint), controlPlaneEndpoint) + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(controlPlaneEndpoint.Status.Host).ToNot(BeEmpty()) + g.Expect(controlPlaneEndpoint.Status.Port).ToNot(BeZero()) + + // Check manager and server internal status + resourceGroup := klog.KObj(controlPlaneEndpoint).String() + foo := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + } + g.Expect(workloadClustersManager.GetResourceGroup(resourceGroup).GetClient().Create(ctx, foo)).To(Succeed()) // the operation succeed if the resource group has been created as expected + g.Expect(workloadClustersMux.ListListeners()).To(HaveKey(resourceGroup)) + + // PART 2: Should delete a ControlPlaneEndpoint + + err = crclient.Delete(ctx, controlPlaneEndpoint) + g.Expect(err).ToNot(HaveOccurred()) + + res, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ + Namespace: controlPlaneEndpoint.Namespace, + Name: controlPlaneEndpoint.Name, + }}) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res).To(Equal(ctrl.Result{})) + + // Check manager and server internal status + g.Expect(workloadClustersManager.GetResourceGroup(resourceGroup).GetClient().Create(ctx, foo)).ToNot(Succeed()) // the operation fails if the resource group has been deleted as expected + g.Expect(workloadClustersMux.ListListeners()).ToNot(HaveKey(resourceGroup)) +} diff --git a/test/infrastructure/vcsim/controllers/doc.go b/test/infrastructure/vcsim/controllers/doc.go new file mode 100644 index 0000000000..7c2c9f65bb --- /dev/null +++ b/test/infrastructure/vcsim/controllers/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package controllers implements reconcilers for the vcsim controller. +package controllers diff --git a/test/infrastructure/vcsim/controllers/envvar_controller.go b/test/infrastructure/vcsim/controllers/envvar_controller.go new file mode 100644 index 0000000000..b0e7d39534 --- /dev/null +++ b/test/infrastructure/vcsim/controllers/envvar_controller.go @@ -0,0 +1,250 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "fmt" + "strconv" + "strings" + "sync" + + "github.com/pkg/errors" + "golang.org/x/crypto/ssh" + apierrors "k8s.io/apimachinery/pkg/api/errors" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" + "k8s.io/utils/ptr" + "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/cluster-api/util/predicates" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + + vcsimv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/api/v1alpha1" +) + +type EnvVarReconciler struct { + Client client.Client + SupervisorMode bool + + PodIP string + sshKeys map[string]string + lock sync.RWMutex + + // WatchFilterValue is the label value used to filter events prior to reconciliation. + WatchFilterValue string +} + +// +kubebuilder:rbac:groups=vcsim.infrastructure.cluster.x-k8s.io,resources=envvars,verbs=get;list;watch;patch +// +kubebuilder:rbac:groups=vcsim.infrastructure.cluster.x-k8s.io,resources=envvars/status,verbs=get;update;patch + +func (r *EnvVarReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { + log := ctrl.LoggerFrom(ctx) + + // Fetch the EnvVar instance + envVar := &vcsimv1.EnvVar{} + if err := r.Client.Get(ctx, req.NamespacedName, envVar); err != nil { + if apierrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{}, err + } + + // Fetch the VCenterSimulator instance + if envVar.Spec.VCenterSimulator == "" { + return ctrl.Result{}, errors.New("Spec.VCenter cannot be empty") + } + + vCenterSimulator := &vcsimv1.VCenterSimulator{} + if err := r.Client.Get(ctx, client.ObjectKey{ + Namespace: envVar.Namespace, + Name: envVar.Spec.VCenterSimulator, + }, vCenterSimulator); err != nil { + return ctrl.Result{}, errors.Wrapf(err, "failed to get VCenter") + } + log = log.WithValues("VCenter", klog.KObj(vCenterSimulator)) + ctx = ctrl.LoggerInto(ctx, log) + + // Fetch the ControlPlaneEndpoint instance + if envVar.Spec.Cluster.Name == "" { + return ctrl.Result{}, errors.New("Spec.Cluster.Name cannot be empty") + } + + controlPlaneEndpoint := &vcsimv1.ControlPlaneEndpoint{} + if err := r.Client.Get(ctx, client.ObjectKey{ + Namespace: envVar.Namespace, + Name: envVar.Spec.Cluster.Name, + }, controlPlaneEndpoint); err != nil { + return ctrl.Result{}, errors.Wrapf(err, "failed to get ControlPlaneEndpoint") + } + log = log.WithValues("ControlPlaneEndpoint", klog.KObj(controlPlaneEndpoint)) + ctx = ctrl.LoggerInto(ctx, log) + + // Initialize the patch helper + patchHelper, err := patch.NewHelper(envVar, r.Client) + if err != nil { + return ctrl.Result{}, err + } + + // Always attempt to Patch the EnvSubst object and status after each reconciliation. + defer func() { + if err := patchHelper.Patch(ctx, envVar); err != nil { + reterr = kerrors.NewAggregate([]error{reterr, err}) + } + }() + + // Handle deleted EnvSubst + if !controlPlaneEndpoint.DeletionTimestamp.IsZero() { + return r.reconcileDelete(ctx, envVar, vCenterSimulator, controlPlaneEndpoint) + } + + // Handle non-deleted EnvSubst + return ctrl.Result{}, r.reconcileNormal(ctx, envVar, vCenterSimulator, controlPlaneEndpoint) +} + +func (r *EnvVarReconciler) reconcileNormal(ctx context.Context, envVar *vcsimv1.EnvVar, vCenterSimulator *vcsimv1.VCenterSimulator, controlPlaneEndpoint *vcsimv1.ControlPlaneEndpoint) error { + log := ctrl.LoggerFrom(ctx) + log.Info("Reconciling VCSim EnvVar") + + r.lock.Lock() + defer r.lock.Unlock() + + if r.sshKeys == nil { + r.sshKeys = map[string]string{} + } + + key := klog.KObj(vCenterSimulator).String() + sshKey, ok := r.sshKeys[key] + if !ok { + bitSize := 4096 + + privateKey, err := generatePrivateKey(bitSize) + if err != nil { + return errors.Wrapf(err, "failed to generate private key") + } + + publicKeyBytes, err := generatePublicKey(&privateKey.PublicKey) + if err != nil { + return errors.Wrapf(err, "failed to generate public key") + } + + sshKey = string(publicKeyBytes) + r.sshKeys[key] = sshKey + log.Info("Created ssh authorized key") + } + + // Common variables (used both in supervisor and govmomi mode) + envVar.Status.Variables = map[string]string{ + // cluster template variables about the vcsim instance. + "VSPHERE_PASSWORD": vCenterSimulator.Status.Password, + "VSPHERE_USERNAME": vCenterSimulator.Status.Username, + + // Variables for machines ssh key + "VSPHERE_SSH_AUTHORIZED_KEY": sshKey, + + // other variables required by the cluster template. + "NAMESPACE": vCenterSimulator.Namespace, + "CLUSTER_NAME": envVar.Spec.Cluster.Name, + "KUBERNETES_VERSION": ptr.Deref(envVar.Spec.Cluster.KubernetesVersion, "v1.28.0"), + "CONTROL_PLANE_MACHINE_COUNT": strconv.Itoa(ptr.Deref(envVar.Spec.Cluster.ControlPlaneMachines, 1)), + "WORKER_MACHINE_COUNT": strconv.Itoa(ptr.Deref(envVar.Spec.Cluster.WorkerMachines, 1)), + + // variables for the fake APIServer endpoint + "CONTROL_PLANE_ENDPOINT_IP": controlPlaneEndpoint.Status.Host, + "CONTROL_PLANE_ENDPOINT_PORT": strconv.Itoa(controlPlaneEndpoint.Status.Port), + + // variables to set up govc for working with the vcsim instance. + "GOVC_URL": fmt.Sprintf("https://%s:%s@%s/sdk", vCenterSimulator.Status.Username, vCenterSimulator.Status.Password, strings.Replace(vCenterSimulator.Status.Host, r.PodIP, "127.0.0.1", 1)), // NOTE: reverting back to local host because the assumption is that the vcsim pod will be port-forwarded on local host + "GOVC_INSECURE": "true", + } + + if r.SupervisorMode { + // Variables used only in supervisor mode + envVar.Status.Variables["VSPHERE_STORAGE_POLICY"] = "vcsim-default" + envVar.Status.Variables["VSPHERE_MACHINE_CLASS_NAME"] = "best-effort-2xlarge" + envVar.Status.Variables["VSPHERE_POWER_OFF_MODE"] = "trySoft" + envVar.Status.Variables["VSPHERE_IMAGE_NAME"] = "test-image-ovf" + envVar.Status.Variables["VSPHERE_STORAGE_CLASS"] = "vcsim-default" + return nil + } + + // Variables used only in govmomi mode + + // cluster template variables about the vcsim instance. + envVar.Status.Variables["VSPHERE_SERVER"] = fmt.Sprintf("https://%s", vCenterSimulator.Status.Host) + envVar.Status.Variables["VSPHERE_TLS_THUMBPRINT"] = vCenterSimulator.Status.Thumbprint + envVar.Status.Variables["VSPHERE_DATACENTER"] = vcsimDatacenterName(ptr.Deref(envVar.Spec.Cluster.Datacenter, 0)) + envVar.Status.Variables["VSPHERE_DATASTORE"] = vcsimDatastoreName(ptr.Deref(envVar.Spec.Cluster.Datastore, 0)) + envVar.Status.Variables["VSPHERE_FOLDER"] = fmt.Sprintf("/DC%d/vm", ptr.Deref(envVar.Spec.Cluster.Datacenter, 0)) + envVar.Status.Variables["VSPHERE_NETWORK"] = fmt.Sprintf("/DC%d/network/VM Network", ptr.Deref(envVar.Spec.Cluster.Datacenter, 0)) + envVar.Status.Variables["VSPHERE_RESOURCE_POOL"] = fmt.Sprintf("/DC%d/host/DC%[1]d_C%d/Resources", ptr.Deref(envVar.Spec.Cluster.Datacenter, 0), ptr.Deref(envVar.Spec.Cluster.Cluster, 0)) + envVar.Status.Variables["VSPHERE_STORAGE_POLICY"] = vcsimDefaultStoragePolicyName + envVar.Status.Variables["VSPHERE_TEMPLATE"] = fmt.Sprintf("/DC%d/vm/%s", ptr.Deref(envVar.Spec.Cluster.Datacenter, 0), vcsimDefaultVMTemplateName) + + return nil +} + +func (r *EnvVarReconciler) reconcileDelete(_ context.Context, _ *vcsimv1.EnvVar, _ *vcsimv1.VCenterSimulator, _ *vcsimv1.ControlPlaneEndpoint) (ctrl.Result, error) { + return ctrl.Result{}, nil +} + +// SetupWithManager will add watches for this controller. +func (r *EnvVarReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { + err := ctrl.NewControllerManagedBy(mgr). + For(&vcsimv1.EnvVar{}). + WithOptions(options). + WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). + Complete(r) + + if err != nil { + return errors.Wrap(err, "failed setting up with a controller manager") + } + return nil +} + +// generatePrivateKey creates a RSA Private Key of specified byte size. +func generatePrivateKey(bitSize int) (*rsa.PrivateKey, error) { + // Private Key generation + privateKey, err := rsa.GenerateKey(rand.Reader, bitSize) + if err != nil { + return nil, err + } + + // Validate Private Key + err = privateKey.Validate() + if err != nil { + return nil, err + } + + return privateKey, nil +} + +// generatePublicKey take a rsa.PublicKey and return bytes suitable for writing to .pub file +// returns in the format "ssh-rsa ...". +func generatePublicKey(privatekey *rsa.PublicKey) ([]byte, error) { + publicRsaKey, err := ssh.NewPublicKey(privatekey) + if err != nil { + return nil, err + } + + pubKeyBytes := ssh.MarshalAuthorizedKey(publicRsaKey) + + return pubKeyBytes, nil +} diff --git a/test/infrastructure/vcsim/controllers/images/images.go b/test/infrastructure/vcsim/controllers/images/images.go new file mode 100644 index 0000000000..9d736ca76c --- /dev/null +++ b/test/infrastructure/vcsim/controllers/images/images.go @@ -0,0 +1,28 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package images contains fake images for the vcsim content library. +package images + +import ( + _ "embed" +) + +var ( + // SampleOVF image. + //go:embed ttylinux-pc_i486-16.1.ovf + SampleOVF []byte +) diff --git a/test/infrastructure/vcsim/controllers/images/ttylinux-pc_i486-16.1.ovf b/test/infrastructure/vcsim/controllers/images/ttylinux-pc_i486-16.1.ovf new file mode 100644 index 0000000000..ccf48c4979 --- /dev/null +++ b/test/infrastructure/vcsim/controllers/images/ttylinux-pc_i486-16.1.ovf @@ -0,0 +1,94 @@ + + + + + + + + Virtual disk information + + + + The list of logical networks + + The nat network + + + + A virtual machine + ttylinux-pc_i486-16.1 + + The kind of installed guest operating system + + + Virtual hardware requirements + + Virtual Hardware Family + 0 + ttylinux-pc_i486-16.1 + vmx-09 + + + hertz * 10^6 + Number of Virtual CPUs + 1 virtual CPU(s) + 1 + 3 + 1 + + + byte * 2^20 + Memory Size + 32MB of memory + 2 + 4 + 32 + + + 0 + IDE Controller + ideController0 + 3 + 5 + + + 0 + disk0 + ovf:/disk/vmdisk1 + 4 + 3 + 17 + + + 1 + true + nat + E1000 ethernet adapter on "nat" + ethernet0 + 5 + E1000 + 10 + + + + false + video + 6 + 24 + + + false + vmci + 7 + vmware.vmci + 1 + + + + + + + + + + diff --git a/test/infrastructure/vcsim/controllers/vcsim.go b/test/infrastructure/vcsim/controllers/vcsim.go new file mode 100644 index 0000000000..7d0c1aa1bb --- /dev/null +++ b/test/infrastructure/vcsim/controllers/vcsim.go @@ -0,0 +1,102 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + "github.com/vmware/govmomi/govc/cli" + "github.com/vmware/govmomi/simulator" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + + vcsimv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/api/v1alpha1" +) + +// vcsim objects. + +const ( + vcsimMinVersionForCAPV = "7.0.0" + vcsimDefaultNetworkName = "VM Network" + vcsimDefaultStoragePolicyName = "vSAN Default Storage Policy" + + // Note: for the sake of testing with vcsim the template doesn't really matter (nor the version of K8s hosted on it) + // so we create only a VM template with a well-known name. + vcsimDefaultVMTemplateName = "ubuntu-2204-kube-vX" +) + +func vcsimDatacenterName(datacenter int) string { + return fmt.Sprintf("DC%d", datacenter) +} + +func vcsimClusterName(datacenter, cluster int) string { + return fmt.Sprintf("%s_C%d", vcsimDatacenterName(datacenter), cluster) +} + +func vcsimClusterPath(datacenter, cluster int) string { + return fmt.Sprintf("/%s/host/%s", vcsimDatacenterName(datacenter), vcsimClusterName(datacenter, cluster)) +} + +func vcsimDatastoreName(datastore int) string { + return fmt.Sprintf("LocalDS_%d", datastore) +} + +func vcsimDatastorePath(datacenter, datastore int) string { + return fmt.Sprintf("/%s/datastore/%s", vcsimDatacenterName(datacenter), vcsimDatastoreName(datastore)) +} + +func vcsimResourcePoolPath(datacenter, cluster int) string { + return fmt.Sprintf("/%s/host/%s/Resources", vcsimDatacenterName(datacenter), vcsimClusterName(datacenter, cluster)) +} + +func vcsimVMFolderName(datacenter int) string { + return fmt.Sprintf("%s/vm", vcsimDatacenterName(datacenter)) +} + +func vcsimVMPath(datacenter int, vm string) string { + return fmt.Sprintf("/%s/%s", vcsimVMFolderName(datacenter), vm) +} + +func createVMTemplate(ctx context.Context, vCenterSimulator *vcsimv1.VCenterSimulator) error { + log := ctrl.LoggerFrom(ctx) + govcURL := fmt.Sprintf("https://%s:%s@%s/sdk", vCenterSimulator.Status.Username, vCenterSimulator.Status.Password, vCenterSimulator.Status.Host) + + // TODO: Investigate how template are supposed to work + // we create a template in a datastore, what if many? + // we create a template in a cluster, but the generated vm doesn't have the cluster in the path. What if I have many clusters? + cluster := 0 + datastore := 0 + datacenters := 1 + if vCenterSimulator.Spec.Model != nil { + datacenters = ptr.Deref(vCenterSimulator.Spec.Model.Datacenter, simulator.VPX().Datacenter) // VPX is the same base model used when creating vcsim + } + for dc := 0; dc < datacenters; dc++ { + exit := cli.Run([]string{"vm.create", fmt.Sprintf("-ds=%s", vcsimDatastoreName(datastore)), fmt.Sprintf("-cluster=%s", vcsimClusterName(dc, cluster)), fmt.Sprintf("-net=%s", vcsimDefaultNetworkName), "-disk=20G", "-on=false", "-k=true", fmt.Sprintf("-u=%s", govcURL), vcsimDefaultVMTemplateName}) + if exit != 0 { + return errors.New("failed to create vm template") + } + + exit = cli.Run([]string{"vm.markastemplate", "-k=true", fmt.Sprintf("-u=%s", govcURL), vcsimVMPath(dc, vcsimDefaultVMTemplateName)}) + if exit != 0 { + return errors.New("failed to mark vm template") + } + log.Info("Created VM template", "name", vcsimDefaultVMTemplateName) + } + return nil +} diff --git a/test/infrastructure/vcsim/controllers/vcsim_controller.go b/test/infrastructure/vcsim/controllers/vcsim_controller.go new file mode 100644 index 0000000000..ada75cd101 --- /dev/null +++ b/test/infrastructure/vcsim/controllers/vcsim_controller.go @@ -0,0 +1,361 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "crypto/sha1" //nolint: gosec + "crypto/tls" + "crypto/x509" + "fmt" + "net" + "net/url" + "strings" + "sync" + + _ "github.com/dougm/pretty" // NOTE: this is required to add commands vm.* to cli.Run + "github.com/pkg/errors" + _ "github.com/vmware/govmomi/govc/vm" // NOTE: this is required to add commands vm.* to cli.Run + pbmsimulator "github.com/vmware/govmomi/pbm/simulator" + "github.com/vmware/govmomi/simulator" + _ "github.com/vmware/govmomi/vapi/simulator" // NOTE: this is required to content library & other vapi methods to the simulator + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" + "k8s.io/utils/ptr" + "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/cluster-api/util/predicates" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + "sigs.k8s.io/cluster-api-provider-vsphere/internal/test/helpers/vcsim" + "sigs.k8s.io/cluster-api-provider-vsphere/test/framework/vmoperator" + vcsimv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/api/v1alpha1" + "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/controllers/images" +) + +const ( + // vmIP reconciler secret. + + netConfigMapName = "vsphere.provider.config.netoperator.vmware.com" + netConfigServerURLKey = "server" + netConfigDatacenterKey = "datacenter" + netConfigUsernameKey = "username" + netConfigPasswordKey = "password" + netConfigThumbprintKey = "thumbprint" +) + +type VCenterSimulatorReconciler struct { + Client client.Client + SupervisorMode bool + + vcsimInstances map[string]*vcsim.Simulator + lock sync.RWMutex + + // WatchFilterValue is the label value used to filter events prior to reconciliation. + WatchFilterValue string +} + +// +kubebuilder:rbac:groups=vcsim.infrastructure.cluster.x-k8s.io,resources=vcentersimulators,verbs=get;list;watch;patch +// +kubebuilder:rbac:groups=vcsim.infrastructure.cluster.x-k8s.io,resources=vcentersimulators/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=topology.tanzu.vmware.com,resources=availabilityzones,verbs=get;list;watch;create +// +kubebuilder:rbac:groups=vmoperator.vmware.com,resources=virtualmachineclasses,verbs=get;list;watch;create +// +kubebuilder:rbac:groups=vmoperator.vmware.com,resources=virtualmachineclassbindings,verbs=get;list;watch;create +// +kubebuilder:rbac:groups=vmoperator.vmware.com,resources=contentlibraryproviders,verbs=get;list;watch;create +// +kubebuilder:rbac:groups=vmoperator.vmware.com,resources=contentsources,verbs=get;list;watch;create +// +kubebuilder:rbac:groups=vmoperator.vmware.com,resources=contentsourcebindings,verbs=get;list;watch;create +// +kubebuilder:rbac:groups=vmoperator.vmware.com,resources=virtualmachineimages,verbs=get;list;watch;create +// +kubebuilder:rbac:groups=storage.k8s.io,resources=storageclasses,verbs=get;list;watch;create +// +kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch;create +// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create +// +kubebuilder:rbac:groups="",resources=resourcequotas,verbs=get;list;watch;create + +func (r *VCenterSimulatorReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { + // Fetch the VCenterSimulator instance + vCenterSimulator := &vcsimv1.VCenterSimulator{} + if err := r.Client.Get(ctx, req.NamespacedName, vCenterSimulator); err != nil { + if apierrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{}, err + } + + // Initialize the patch helper + patchHelper, err := patch.NewHelper(vCenterSimulator, r.Client) + if err != nil { + return ctrl.Result{}, err + } + + // Always attempt to Patch the VCenter object and status after each reconciliation. + defer func() { + if err := patchHelper.Patch(ctx, vCenterSimulator); err != nil { + reterr = kerrors.NewAggregate([]error{reterr, err}) + } + }() + + // Handle deleted machines + if !vCenterSimulator.DeletionTimestamp.IsZero() { + r.reconcileDelete(ctx, vCenterSimulator) + return ctrl.Result{}, nil + } + + // Add finalizer first if not set to avoid the race condition between init and delete. + // Note: Finalizers in general can only be added when the deletionTimestamp is not set. + if !controllerutil.ContainsFinalizer(vCenterSimulator, vcsimv1.VCenterFinalizer) { + controllerutil.AddFinalizer(vCenterSimulator, vcsimv1.VCenterFinalizer) + return ctrl.Result{}, nil + } + + // Handle non-deleted machines + return ctrl.Result{}, r.reconcileNormal(ctx, vCenterSimulator) +} + +func (r *VCenterSimulatorReconciler) reconcileNormal(ctx context.Context, vCenterSimulator *vcsimv1.VCenterSimulator) error { + log := ctrl.LoggerFrom(ctx) + log.Info("Reconciling VCenter") + + r.lock.Lock() + defer r.lock.Unlock() + + if r.vcsimInstances == nil { + r.vcsimInstances = map[string]*vcsim.Simulator{} + } + + key := klog.KObj(vCenterSimulator).String() + if _, ok := r.vcsimInstances[key]; !ok { + // Define the model for the VCSim instance, starting from simulator.VPX + // and changing version + all the setting specified in the spec. + // NOTE: it is necessary to create the model before passing it to the builder + // in order to register the endpoint for handling request about storage policies. + model := simulator.VPX() + model.ServiceContent.About.Version = vcsimMinVersionForCAPV + if vCenterSimulator.Spec.Model != nil { + model.ServiceContent.About.Version = ptr.Deref(vCenterSimulator.Spec.Model.VSphereVersion, model.ServiceContent.About.Version) + model.Datacenter = ptr.Deref(vCenterSimulator.Spec.Model.Datacenter, model.Datacenter) + model.Cluster = ptr.Deref(vCenterSimulator.Spec.Model.Cluster, model.Cluster) + model.ClusterHost = ptr.Deref(vCenterSimulator.Spec.Model.ClusterHost, model.ClusterHost) + model.Pool = ptr.Deref(vCenterSimulator.Spec.Model.Pool, model.Pool) + model.Datastore = ptr.Deref(vCenterSimulator.Spec.Model.Datastore, model.Datastore) + } + if err := model.Create(); err != nil { + return errors.Wrapf(err, "failed to create vcsim server model") + } + model.Service.RegisterSDK(pbmsimulator.New()) + + // Compute the vcsim URL, binding all interfaces (so it will be accessible both from other pods via the pod IP and via kubectl port-forward on local host); + // a random port will be used unless we are reconciling a previously existing vCenterSimulator after a restart; + // in case of restart it will try to re-use the port previously assigned, but the internal status of vcsim will be lost. + // NOTE: re-using the same port might be racy with other vcsimURL being created using a random port, + // but we consider this risk acceptable for testing purposes. + host := "0.0.0.0" + port := "0" + if vCenterSimulator.Status.Host != "" { + _, port, _ = net.SplitHostPort(vCenterSimulator.Status.Host) + } + vcsimURL, err := url.Parse(fmt.Sprintf("https://%s", net.JoinHostPort(host, port))) + if err != nil { + return errors.Wrapf(err, "failed to parse vcsim server url") + } + + // Start the vcsim instance + vcsimInstance, err := vcsim.NewBuilder(). + WithModel(model). + SkipModelCreate(). + WithURL(vcsimURL). + Build() + + if err != nil { + return errors.Wrapf(err, "failed to create vcsim server instance") + } + r.vcsimInstances[key] = vcsimInstance + log.Info("Created vcsim server", "url", vcsimInstance.ServerURL()) + + vCenterSimulator.Status.Host = vcsimInstance.ServerURL().Host + vCenterSimulator.Status.Username = vcsimInstance.Username() + vCenterSimulator.Status.Password = vcsimInstance.Password() + + // Add a VM template + // Note: for the sake of testing with vcsim the template doesn't really matter (nor the version of K8s hosted on it) + // so we create only a VM template with a well-known name. + if err := createVMTemplate(ctx, vCenterSimulator); err != nil { + return err + } + } + + if vCenterSimulator.Status.Thumbprint == "" { + // Compute the Thumbprint out of the certificate self-generated by vcsim. + config := &tls.Config{InsecureSkipVerify: true} //nolint: gosec + addr := vCenterSimulator.Status.Host + conn, err := tls.Dial("tcp", addr, config) + if err != nil { + return errors.Wrapf(err, "failed to connect to vcsim server instance to infer thumbprint") + } + defer conn.Close() + + cert := conn.ConnectionState().PeerCertificates[0] + vCenterSimulator.Status.Thumbprint = ThumbprintSHA1(cert) + } + + if r.SupervisorMode { + // In order to run the vm-operator in standalone mode it is required to provide it with the dependencies it needs to work: + // - A set of objects/configurations in the vCenterSimulator cluster the vm-operator is pointing to + // - A set of Kubernetes object the vm-operator relies on + + // To mimic the supervisor cluster, there will be only one vm-operator instance for each management cluster; + // also, the logic below should consider that the instance of the vm-operator is bound to a specific vCenterSimulator cluster. + + // Those are config for vCenterSimulator cluster DC0/C0, datastore LocalDS_0 in vcsim. + datacenter := 0 + cluster := 0 + datastore := 0 + + config := vmoperator.Dependencies{ + // This is where tilt deploys the vm-operator + Namespace: vmoperator.DefaultNamespace, + + VCenterCluster: vmoperator.VCenterClusterConfig{ + ServerURL: vCenterSimulator.Status.Host, + Username: vCenterSimulator.Status.Username, + Password: vCenterSimulator.Status.Password, + Thumbprint: vCenterSimulator.Status.Thumbprint, + Datacenter: vcsimDatacenterName(datacenter), + Cluster: vcsimClusterPath(datacenter, cluster), + Folder: vcsimVMFolderName(datacenter), + ResourcePool: vcsimResourcePoolPath(datacenter, cluster), + StoragePolicyID: vcsimDefaultStoragePolicyName, + + // Those are settings for a fake content library we are going to create given that it doesn't exists in vcsim by default. + ContentLibrary: vmoperator.ContentLibraryConfig{ + Name: "kubernetes", + Datastore: vcsimDatastorePath(datacenter, datastore), + Item: vmoperator.ContentLibraryItemConfig{ + Name: "test-image-ovf", + Files: []vmoperator.ContentLibraryItemFilesConfig{ // TODO: check if we really need both + { + Name: "ttylinux-pc_i486-16.1.ovf", + Content: images.SampleOVF, + }, + }, + ItemType: "ovf", + ProductInfo: "dummy-productInfo", + OSInfo: "dummy-OSInfo", + }, + }, + }, + + // The users are expected to store Cluster API clusters to be managed by the vm-operator + // in the default namespace and to use the "vcsim-default" storage class. + UserNamespace: vmoperator.UserNamespaceConfig{ + Name: corev1.NamespaceDefault, + StorageClass: "vcsim-default", + }, + } + + if err := vmoperator.ReconcileDependencies(ctx, r.Client, config); err != nil { + return err + } + + // The vm-operator doesn't take care of the networking part of the VM, which is usually + // managed by other components in the supervisor cluster. + // In order to make things to work in vcsim, there is the vmIP reconciler, which requires + // some info about the vcsim instance; in order to do so, we are creating a Secret. + + if err := addPreRequisitesForVMIPreconciler(ctx, r.Client, config); err != nil { + return err + } + } + + return nil +} + +func addPreRequisitesForVMIPreconciler(ctx context.Context, c client.Client, config vmoperator.Dependencies) error { + log := ctrl.LoggerFrom(ctx) + log.Info("Reconciling requirements for the Fake net-operator Deployment") + + netOperatorSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: netConfigMapName, + Namespace: config.Namespace, + }, + StringData: map[string]string{ + netConfigServerURLKey: config.VCenterCluster.ServerURL, + netConfigDatacenterKey: config.VCenterCluster.Datacenter, + netConfigUsernameKey: config.VCenterCluster.Username, + netConfigPasswordKey: config.VCenterCluster.Password, + netConfigThumbprintKey: config.VCenterCluster.Thumbprint, + }, + Type: corev1.SecretTypeOpaque, + } + if err := c.Get(ctx, client.ObjectKeyFromObject(netOperatorSecret), netOperatorSecret); err != nil { + if !apierrors.IsNotFound(err) { + return errors.Wrapf(err, "failed to get net-operator Secret %s", netOperatorSecret.Name) + } + if err := c.Create(ctx, netOperatorSecret); err != nil { + return errors.Wrapf(err, "failed to create net-operator Secret %s", netOperatorSecret.Name) + } + log.Info("Created net-operator Secret", "Secret", klog.KObj(netOperatorSecret)) + } + + return nil +} + +func (r *VCenterSimulatorReconciler) reconcileDelete(ctx context.Context, vCenterSimulator *vcsimv1.VCenterSimulator) { + log := ctrl.LoggerFrom(ctx) + log.Info("Reconciling delete VCenter server") + + r.lock.Lock() + defer r.lock.Unlock() + + key := klog.KObj(vCenterSimulator).String() + vcsimInstance, ok := r.vcsimInstances[key] + if ok { + log.Info("Deleting vcsim server") + vcsimInstance.Destroy() + delete(r.vcsimInstances, key) + } + + controllerutil.RemoveFinalizer(vCenterSimulator, vcsimv1.VCenterFinalizer) +} + +// SetupWithManager will add watches for this controller. +func (r *VCenterSimulatorReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { + err := ctrl.NewControllerManagedBy(mgr). + For(&vcsimv1.VCenterSimulator{}). + WithOptions(options). + WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). + Complete(r) + + if err != nil { + return errors.Wrap(err, "failed setting up with a controller manager") + } + + return nil +} + +// ThumbprintSHA1 returns the thumbprint of the given cert in the same format used by the SDK and Client.SetThumbprint. +func ThumbprintSHA1(cert *x509.Certificate) string { + sum := sha1.Sum(cert.Raw) //nolint: gosec + hex := make([]string, len(sum)) + for i, b := range sum { + hex[i] = fmt.Sprintf("%02X", b) + } + return strings.Join(hex, ":") +} diff --git a/test/infrastructure/vcsim/controllers/vcsim_controller_test.go b/test/infrastructure/vcsim/controllers/vcsim_controller_test.go new file mode 100644 index 0000000000..e47345ab7e --- /dev/null +++ b/test/infrastructure/vcsim/controllers/vcsim_controller_test.go @@ -0,0 +1,117 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "testing" + + _ "github.com/dougm/pretty" + . "github.com/onsi/gomega" + _ "github.com/vmware/govmomi/govc/vm" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "sigs.k8s.io/cluster-api-provider-vsphere/pkg/session" + vcsimv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/api/v1alpha1" +) + +func Test_Reconcile_Server(t *testing.T) { + g := NewWithT(t) + + vCenterSimulator := &vcsimv1.VCenterSimulator{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + Name: "foo", + Finalizers: []string{ + vcsimv1.VCenterFinalizer, // Adding this to move past the first reconcile + }, + }, + Spec: vcsimv1.VCenterSimulatorSpec{}, + } + + crclient := fake.NewClientBuilder().WithObjects(vCenterSimulator).WithStatusSubresource(vCenterSimulator).WithScheme(scheme).Build() + r := &VCenterSimulatorReconciler{ + Client: crclient, + } + + // PART 1: Should create a new VCenter + + res, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ + Namespace: vCenterSimulator.Namespace, + Name: vCenterSimulator.Name, + }}) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res).To(Equal(ctrl.Result{})) + + // Check the VCenter instance has been created in the reconciler internal status + func() { + r.lock.RLock() + defer r.lock.RUnlock() + + key := klog.KObj(vCenterSimulator).String() + g.Expect(r.vcsimInstances).To(HaveKey(key)) + g.Expect(r.vcsimInstances[key]).ToNot(BeNil()) + }() + + // Gets the reconciled object and tests if the VCenter instance actually works + err = crclient.Get(ctx, client.ObjectKeyFromObject(vCenterSimulator), vCenterSimulator) + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(vCenterSimulator.Status.Host).ToNot(BeEmpty()) + g.Expect(vCenterSimulator.Status.Username).ToNot(BeEmpty()) + g.Expect(vCenterSimulator.Status.Password).ToNot(BeEmpty()) + + params := session.NewParams(). + WithServer(vCenterSimulator.Status.Host). + WithThumbprint(vCenterSimulator.Status.Thumbprint). + WithUserInfo(vCenterSimulator.Status.Username, vCenterSimulator.Status.Password) + + s, err := session.GetOrCreate(ctx, params) + g.Expect(err).ToNot(HaveOccurred()) + + v, err := s.GetVersion() + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(v).ToNot(BeEmpty()) + + dc, err := s.Finder.Datacenter(ctx, "DC0") + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(dc).ToNot(BeNil()) + + // PART 2: Should delete a VCenter + err = crclient.Delete(ctx, vCenterSimulator) + g.Expect(err).ToNot(HaveOccurred()) + + res, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ + Namespace: vCenterSimulator.Namespace, + Name: vCenterSimulator.Name, + }}) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res).To(Equal(ctrl.Result{})) + + // Check the VCenter instance has been created in the reconciler internal status + func() { + r.lock.RLock() + defer r.lock.RUnlock() + + key := klog.KObj(vCenterSimulator).String() + g.Expect(r.vcsimInstances).ToNot(HaveKey(key)) + }() +} diff --git a/test/infrastructure/vcsim/controllers/vcsim_test.go b/test/infrastructure/vcsim/controllers/vcsim_test.go new file mode 100644 index 0000000000..edd5a5a9bc --- /dev/null +++ b/test/infrastructure/vcsim/controllers/vcsim_test.go @@ -0,0 +1,44 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "testing" + + . "github.com/onsi/gomega" +) + +func Test_vcsim_NamesAndPath(t *testing.T) { + g := NewWithT(t) + + datacenter := 5 + cluster := 3 + datastore := 7 + + g.Expect(vcsimDatacenterName(datacenter)).To(Equal("DC5")) + g.Expect(vcsimClusterName(datacenter, cluster)).To(Equal("DC5_C3")) + g.Expect(vcsimClusterPath(datacenter, cluster)).To(Equal("/DC5/host/DC5_C3")) + g.Expect(vcsimDatastoreName(datastore)).To(Equal("LocalDS_7")) + g.Expect(vcsimDatastorePath(datacenter, datastore)).To(Equal("/DC5/datastore/LocalDS_7")) + g.Expect(vcsimResourcePoolPath(datacenter, cluster)).To(Equal("/DC5/host/DC5_C3/Resources")) + g.Expect(vcsimVMFolderName(datacenter)).To(Equal("DC5/vm")) + g.Expect(vcsimVMPath(datacenter, "my-mv")).To(Equal("/DC5/vm/my-mv")) +} + +func Test_createVMTemplate(_ *testing.T) { + // TODO: implement +} diff --git a/test/infrastructure/vcsim/controllers/virtualmachine_controller.go b/test/infrastructure/vcsim/controllers/virtualmachine_controller.go new file mode 100644 index 0000000000..3516704e4a --- /dev/null +++ b/test/infrastructure/vcsim/controllers/virtualmachine_controller.go @@ -0,0 +1,325 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + "path" + "time" + + "github.com/pkg/errors" + vmoprv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + capiutil "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/annotations" + "sigs.k8s.io/cluster-api/util/patch" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" + vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" + "sigs.k8s.io/cluster-api-provider-vsphere/pkg/session" + "sigs.k8s.io/cluster-api-provider-vsphere/pkg/util" + "sigs.k8s.io/cluster-api-provider-vsphere/test/framework/vmoperator" + inmemoryruntime "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/runtime" + inmemoryserver "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/server" +) + +type VirtualMachineReconciler struct { + Client client.Client + InMemoryManager inmemoryruntime.Manager + APIServerMux *inmemoryserver.WorkloadClustersMux + EnableKeepAlive bool + KeepAliveDuration time.Duration + + // WatchFilterValue is the label value used to filter events prior to reconciliation. + WatchFilterValue string +} + +// +kubebuilder:rbac:groups=vmoperator.vmware.com,resources=virtualmachines,verbs=get;list;watch;update;patch;delete +// +kubebuilder:rbac:groups=vmware.infrastructure.cluster.x-k8s.io,resources=vsphereclusters,verbs=get;list;watch +// +kubebuilder:rbac:groups=vmware.infrastructure.cluster.x-k8s.io,resources=vspheremachines,verbs=get;list;watch +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines,verbs=get;list;watch +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch + +// Reconcile ensures the back-end state reflects the Kubernetes resource state intent. +func (r *VirtualMachineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { + log := ctrl.LoggerFrom(ctx) + + // Fetch the VirtualMachine instance + virtualMachine := &vmoprv1.VirtualMachine{} + if err := r.Client.Get(ctx, req.NamespacedName, virtualMachine); err != nil { + if apierrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{}, err + } + + // Fetch the owner VSphereMachine. + vSphereMachine, err := util.GetOwnerVMWareMachine(ctx, r.Client, virtualMachine.ObjectMeta) + // vsphereMachine can be nil in cases where custom mover other than clusterctl + // moves the resources without ownerreferences set + // in that case nil vsphereMachine can cause panic and CrashLoopBackOff the pod + // preventing vspheremachine_controller from setting the ownerref + if err != nil || vSphereMachine == nil { + log.Info("Owner VSphereMachine not found, won't reconcile") + return reconcile.Result{}, err + } + log = log.WithValues("VSphereMachine", klog.KObj(vSphereMachine)) + ctx = ctrl.LoggerInto(ctx, log) + + // Fetch the Machine. + machine, err := capiutil.GetOwnerMachine(ctx, r.Client, vSphereMachine.ObjectMeta) + if err != nil { + return ctrl.Result{}, err + } + if machine == nil { + log.Info("Waiting for Machine Controller to set OwnerRef on VSphereMachine") + return ctrl.Result{}, nil + } + log = log.WithValues("Machine", klog.KObj(machine)) + ctx = ctrl.LoggerInto(ctx, log) + + // Fetch the Cluster. + cluster, err := capiutil.GetClusterFromMetadata(ctx, r.Client, machine.ObjectMeta) + if err != nil { + log.Info("VSphereMachine owner Machine is missing cluster label or cluster does not exist") + return ctrl.Result{}, err + } + if cluster == nil { + log.Info(fmt.Sprintf("Please associate this machine with a cluster using the label %s: ", clusterv1.ClusterNameLabel)) + return ctrl.Result{}, nil + } + log = log.WithValues("Cluster", klog.KObj(cluster)) + ctx = ctrl.LoggerInto(ctx, log) + + // Return early if the object or Cluster is paused. + if annotations.IsPaused(cluster, virtualMachine) { + log.Info("Reconciliation is paused for this object") + return ctrl.Result{}, nil + } + + // Fetch the VSphereCluster. + key := client.ObjectKey{ + Namespace: cluster.Namespace, + Name: cluster.Spec.InfrastructureRef.Name, + } + vsphereCluster := &vmwarev1.VSphereCluster{} + if err := r.Client.Get(ctx, key, vsphereCluster); err != nil { + log.Info("VSphereCluster can't be retrieved") + return ctrl.Result{}, err + } + log = log.WithValues("VSphereCluster", klog.KObj(vsphereCluster)) + ctx = ctrl.LoggerInto(ctx, log) + + // Compute the resource group unique name. + // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. + resourceGroup := klog.KObj(cluster).String() + + // Check if there is a conditionsTracker in the resource group. + // The conditionsTracker is an object stored in memory with the scope of storing conditions used for keeping + // track of the provisioning process of the fake node, etcd, api server, etc for this specific virtualMachine. + // (the process managed by this controller). + inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() + // NOTE: The type of the in memory conditionsTracker object doesn't matter as soon as it implements Cluster API's conditions interfaces. + // Unfortunately vmoprv1.VirtualMachine isn't a condition getter, so we fallback on using a infrav1.VSphereVM. + conditionsTracker := &infrav1.VSphereVM{} + if err := inmemoryClient.Get(ctx, client.ObjectKeyFromObject(virtualMachine), conditionsTracker); err != nil { + if !apierrors.IsNotFound(err) { + return ctrl.Result{}, errors.Wrap(err, "failed to get conditionsTracker") + } + + conditionsTracker = &infrav1.VSphereVM{ + ObjectMeta: metav1.ObjectMeta{ + Name: virtualMachine.Name, + Namespace: virtualMachine.Namespace, + }, + } + if err := inmemoryClient.Create(ctx, conditionsTracker); err != nil { + return ctrl.Result{}, errors.Wrap(err, "failed to create conditionsTracker") + } + } + + // Initialize the patch helper + patchHelper, err := patch.NewHelper(virtualMachine, r.Client) + if err != nil { + return ctrl.Result{}, err + } + + // Always attempt to Patch the VSphereVM + conditionsTracker object and status after each reconciliation. + defer func() { + // NOTE: Patch on VSphereVM will only add/remove a finalizer. + if err := patchHelper.Patch(ctx, virtualMachine); err != nil { + reterr = kerrors.NewAggregate([]error{reterr, err}) + } + + // NOTE: Patch on conditionsTracker will only track of provisioning process of the fake node, etcd, api server, etc. + if err := inmemoryClient.Update(ctx, conditionsTracker); err != nil { + reterr = kerrors.NewAggregate([]error{reterr, err}) + } + }() + + // Handle deleted machines + if !vSphereMachine.DeletionTimestamp.IsZero() { + return r.reconcileDelete(ctx, cluster, machine, virtualMachine, conditionsTracker) + } + + // Add finalizer first if not set to avoid the race condition between init and delete. + // Note: Finalizers in general can only be added when the deletionTimestamp is not set. + if !controllerutil.ContainsFinalizer(virtualMachine, VMFinalizer) { + controllerutil.AddFinalizer(virtualMachine, VMFinalizer) + return ctrl.Result{}, nil + } + + // Handle non-deleted machines + return r.reconcileNormal(ctx, cluster, machine, virtualMachine, conditionsTracker) +} + +func (r *VirtualMachineReconciler) reconcileNormal(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, virtualMachine *vmoprv1.VirtualMachine, conditionsTracker *infrav1.VSphereVM) (ctrl.Result, error) { + ipReconciler := r.getVMIpReconciler(cluster, virtualMachine) + if ret, err := ipReconciler.ReconcileIP(ctx); !ret.IsZero() || err != nil { + return ret, err + } + + bootstrapReconciler := r.getVMBootstrapReconciler(virtualMachine) + if ret, err := bootstrapReconciler.reconcileBoostrap(ctx, cluster, machine, conditionsTracker); !ret.IsZero() || err != nil { + return ret, err + } + + return ctrl.Result{}, nil +} + +func (r *VirtualMachineReconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, virtualMachine *vmoprv1.VirtualMachine, conditionsTracker *infrav1.VSphereVM) (ctrl.Result, error) { + bootstrapReconciler := r.getVMBootstrapReconciler(virtualMachine) + if ret, err := bootstrapReconciler.reconcileDelete(ctx, cluster, machine, conditionsTracker); !ret.IsZero() || err != nil { + return ret, err + } + + controllerutil.RemoveFinalizer(virtualMachine, VMFinalizer) + return ctrl.Result{}, nil +} + +func (r *VirtualMachineReconciler) getVMIpReconciler(cluster *clusterv1.Cluster, virtualMachine *vmoprv1.VirtualMachine) *vmIPReconciler { + return &vmIPReconciler{ + Client: r.Client, + EnableKeepAlive: r.EnableKeepAlive, + KeepAliveDuration: r.KeepAliveDuration, + + // Type specific functions; those functions wraps the differences between govmomi and supervisor types, + // thus allowing to use the same vmIPReconciler in both scenarios. + GetVCenterSession: func(ctx context.Context) (*session.Session, error) { + // Return a connection to the vCenter where the virtualMachine is hosted + return r.getVCenterSession(ctx) + }, + IsVMWaitingforIP: func() bool { + // A virtualMachine is waiting for an IP when PoweredOn but without an Ip. + return virtualMachine.Status.PowerState == vmoprv1.VirtualMachinePoweredOn && virtualMachine.Status.VmIp == "" + }, + GetVMPath: func() string { + // The vm operator always create VMs under a sub-folder with named like the cluster. + datacenter := 0 + return vcsimVMPath(datacenter, path.Join(cluster.Name, virtualMachine.Name)) + }, + } +} + +func (r *VirtualMachineReconciler) getVMBootstrapReconciler(virtualMachine *vmoprv1.VirtualMachine) *vmBootstrapReconciler { + return &vmBootstrapReconciler{ + Client: r.Client, + InMemoryManager: r.InMemoryManager, + APIServerMux: r.APIServerMux, + + // Type specific functions; those functions wraps the differences between govmomi and supervisor types, + // thus allowing to use the same vmBootstrapReconciler in both scenarios. + IsVMReady: func() bool { + // A virtualMachine is ready to provision fake objects hosted on it when PoweredOn, with a primary Ip assigned and BiosUUID is set (bios id is required when provisioning the node to compute the Provider ID). + return virtualMachine.Status.PowerState == vmoprv1.VirtualMachinePoweredOn && virtualMachine.Status.VmIp != "" && virtualMachine.Status.BiosUUID != "" + }, + GetProviderID: func() string { + // Computes the ProviderID for the node hosted on the virtualMachine + return util.ConvertUUIDToProviderID(virtualMachine.Status.BiosUUID) + }, + } +} + +func (r *VirtualMachineReconciler) getVCenterSession(ctx context.Context) (*session.Session, error) { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: netConfigMapName, + Namespace: vmoperator.DefaultNamespace, // This is where tilt deploys the vm-operator + }, + } + if err := r.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret); err != nil { + return nil, errors.Wrapf(err, "failed to get vm-operator Secret %s", secret.Name) + } + + serverURL := string(secret.Data[netConfigServerURLKey]) + if serverURL == "" { + return nil, errors.Errorf("%s value is missing from the vm-operator Secret %s", netConfigServerURLKey, secret.Name) + } + datacenter := string(secret.Data[netConfigDatacenterKey]) + if datacenter == "" { + return nil, errors.Errorf("%s value is missing from the vm-operator Secret %s", netConfigDatacenterKey, secret.Name) + } + username := string(secret.Data[netConfigUsernameKey]) + if username == "" { + return nil, errors.Errorf("%s value is missing from the vm-operator Secret %s", netConfigUsernameKey, secret.Name) + } + password := string(secret.Data[netConfigPasswordKey]) + if password == "" { + return nil, errors.Errorf("%s value is missing from the vm-operator Secret %s", netConfigPasswordKey, secret.Name) + } + thumbprint := string(secret.Data[netConfigThumbprintKey]) + if thumbprint == "" { + return nil, errors.Errorf("%s value is missing from the vm-operator Secret %s", netConfigThumbprintKey, secret.Name) + } + + params := session.NewParams(). + WithServer(serverURL). + WithDatacenter(datacenter). + WithUserInfo(username, password). + WithThumbprint(thumbprint). + WithFeatures(session.Feature{ + EnableKeepAlive: r.EnableKeepAlive, + KeepAliveDuration: r.KeepAliveDuration, + }) + + return session.GetOrCreate(ctx, params) +} + +// SetupWithManager will add watches for this controller. +func (r *VirtualMachineReconciler) SetupWithManager(_ context.Context, mgr ctrl.Manager, options controller.Options) error { + err := ctrl.NewControllerManagedBy(mgr). + For(&vmoprv1.VirtualMachine{}). + WithOptions(options). + Complete(r) + + if err != nil { + return errors.Wrap(err, "failed setting up with a controller manager") + } + return nil +} diff --git a/test/infrastructure/vcsim/controllers/virtualmachine_controller_test.go b/test/infrastructure/vcsim/controllers/virtualmachine_controller_test.go new file mode 100644 index 0000000000..d0fdea327a --- /dev/null +++ b/test/infrastructure/vcsim/controllers/virtualmachine_controller_test.go @@ -0,0 +1,276 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "testing" + "time" + + . "github.com/onsi/gomega" + vmoprv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2" + "k8s.io/utils/ptr" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util/conditions" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" + vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" + inmemoryruntime "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/runtime" + inmemoryserver "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/server" +) + +func Test_Reconcile_VirtualMachine(t *testing.T) { + t.Run("VirtualMachine not yet provisioned should be ignored", func(t *testing.T) { + g := NewWithT(t) + + vsphereCluster := &vmwarev1.VSphereCluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + UID: "bar", + }, + } + + cluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + UID: "bar", + }, + Spec: clusterv1.ClusterSpec{ + InfrastructureRef: &corev1.ObjectReference{ + APIVersion: vmwarev1.GroupVersion.String(), + Kind: "VSphereCluster", + Namespace: vsphereCluster.Namespace, + Name: vsphereCluster.Name, + UID: vsphereCluster.UID, + }, + }, + } + + machine := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + Labels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + }, + }, + } + + vSphereMachine := &vmwarev1.VSphereMachine{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "baz", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Machine", + Name: machine.Name, + UID: machine.UID, + }, + }, + }, + } + + virtualMachine := &vmoprv1.VirtualMachine{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: vmwarev1.GroupVersion.String(), + Kind: "VSphereMachine", + Name: vSphereMachine.Name, + UID: vSphereMachine.UID, + }, + }, + Finalizers: []string{ + VMFinalizer, // Adding this to move past the first reconcile + }, + }, + } + + // Controller runtime client + crclient := fake.NewClientBuilder().WithObjects(cluster, vsphereCluster, machine, vSphereMachine, virtualMachine).WithScheme(scheme).Build() + + // Start in memory manager & add a resourceGroup for the cluster + inmemoryMgr := inmemoryruntime.NewManager(cloudScheme) + err := inmemoryMgr.Start(ctx) + g.Expect(err).ToNot(HaveOccurred()) + + inmemoryMgr.AddResourceGroup(klog.KObj(cluster).String()) + inmemoryClient := inmemoryMgr.GetResourceGroup(klog.KObj(cluster).String()).GetClient() + + r := VirtualMachineReconciler{ + Client: crclient, + InMemoryManager: inmemoryMgr, + } + + // Reconcile + res, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ + Namespace: virtualMachine.Namespace, + Name: virtualMachine.Name, + }}) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res).To(Equal(ctrl.Result{RequeueAfter: 5 * time.Second})) + + // Check the conditionsTracker is waiting for infrastructure ready + conditionsTracker := &infrav1.VSphereVM{} + err = inmemoryClient.Get(ctx, client.ObjectKeyFromObject(virtualMachine), conditionsTracker) + g.Expect(err).ToNot(HaveOccurred()) + + c := conditions.Get(conditionsTracker, VMProvisionedCondition) + g.Expect(c.Status).To(Equal(corev1.ConditionFalse)) + g.Expect(c.Severity).To(Equal(clusterv1.ConditionSeverityInfo)) + g.Expect(c.Reason).To(Equal(WaitingControlPlaneInitializedReason)) + }) + + t.Run("VirtualMachine provisioned gets a node (worker)", func(t *testing.T) { + g := NewWithT(t) + + vsphereCluster := &vmwarev1.VSphereCluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + UID: "bar", + }, + } + + cluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + UID: "bar", + }, + Spec: clusterv1.ClusterSpec{ + InfrastructureRef: &corev1.ObjectReference{ + APIVersion: vmwarev1.GroupVersion.String(), + Kind: "VSphereCluster", + Namespace: vsphereCluster.Namespace, + Name: vsphereCluster.Name, + UID: vsphereCluster.UID, + }, + }, + } + + machine := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + Labels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + }, + }, + Spec: clusterv1.MachineSpec{ + Bootstrap: clusterv1.Bootstrap{ + DataSecretName: ptr.To("foo"), // this unblocks node provisioning + }, + }, + } + + vSphereMachine := &vmwarev1.VSphereMachine{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Machine", + Name: machine.Name, + UID: machine.UID, + }, + }, + }, + } + + virtualMachine := &vmoprv1.VirtualMachine{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: vmwarev1.GroupVersion.String(), + Kind: "VSphereMachine", + Name: vSphereMachine.Name, + UID: vSphereMachine.UID, + }, + }, + Finalizers: []string{ + VMFinalizer, // Adding this to move past the first reconcile + }, + }, + Status: vmoprv1.VirtualMachineStatus{ + // Those values are required to unblock provisioning of node + BiosUUID: "foo", + VmIp: "1.2.3.4", + PowerState: vmoprv1.VirtualMachinePoweredOn, + }, + } + + // Controller runtime client + crclient := fake.NewClientBuilder().WithObjects(cluster, vsphereCluster, machine, vSphereMachine, virtualMachine).WithScheme(scheme).Build() + + // Start in memory manager & add a resourceGroup for the cluster + inmemoryMgr := inmemoryruntime.NewManager(cloudScheme) + err := inmemoryMgr.Start(ctx) + g.Expect(err).ToNot(HaveOccurred()) + + inmemoryMgr.AddResourceGroup(klog.KObj(cluster).String()) + inmemoryClient := inmemoryMgr.GetResourceGroup(klog.KObj(cluster).String()).GetClient() + + // Start an http server + apiServerMux, err := inmemoryserver.NewWorkloadClustersMux(inmemoryMgr, "127.0.0.1", inmemoryserver.CustomPorts{ + // NOTE: make sure to use ports different than other tests, so we can run tests in parallel + MinPort: inmemoryserver.DefaultMinPort + 200, + MaxPort: inmemoryserver.DefaultMinPort + 299, + DebugPort: inmemoryserver.DefaultDebugPort + 2, + }) + g.Expect(err).ToNot(HaveOccurred()) + + r := VirtualMachineReconciler{ + Client: crclient, + InMemoryManager: inmemoryMgr, + APIServerMux: apiServerMux, + } + + // Reconcile + nodeStartupDuration = 0 * time.Second + + res, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ + Namespace: virtualMachine.Namespace, + Name: virtualMachine.Name, + }}) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res).To(Equal(ctrl.Result{})) + + // Check the mirrorVSphereMachine reports all provisioned + + conditionsTracker := &infrav1.VSphereVM{} + err = inmemoryClient.Get(ctx, client.ObjectKeyFromObject(virtualMachine), conditionsTracker) + g.Expect(err).ToNot(HaveOccurred()) + + c := conditions.Get(conditionsTracker, NodeProvisionedCondition) + g.Expect(c.Status).To(Equal(corev1.ConditionTrue)) + }) +} diff --git a/test/infrastructure/vcsim/controllers/vmbootstrap_controller.go b/test/infrastructure/vcsim/controllers/vmbootstrap_controller.go new file mode 100644 index 0000000000..d3c207b467 --- /dev/null +++ b/test/infrastructure/vcsim/controllers/vmbootstrap_controller.go @@ -0,0 +1,1008 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "crypto/rsa" + "fmt" + "math/rand" + "time" + + "github.com/pkg/errors" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog/v2" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + capiutil "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/certs" + "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/secret" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" + "sigs.k8s.io/cluster-api-provider-vsphere/pkg/util" + inmemoryruntime "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/runtime" + inmemoryserver "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/server" +) + +// TODO: investigate if we can share this code with the CAPI in memory provider. + +const ( + // VMFinalizer allows this reconciler to cleanup resources before removing the + // VSphereVM from the API Server. + VMFinalizer = "vcsim.fake.infrastructure.cluster.x-k8s.io" +) + +const ( + // VMProvisionedCondition documents the status of VM provisioning, + // which includes the VM being provisioned and with a boostrap secret available. + VMProvisionedCondition clusterv1.ConditionType = "VMProvisioned" + + // WaitingForVMInfrastructureReason (Severity=Info) documents provisioning waiting for the VM + // infrastructure to be ready. + WaitingForVMInfrastructureReason = "WaitingForVMInfrastructure" + + // WaitingControlPlaneInitializedReason (Severity=Info) documents provisioning waiting + // for the control plane to be initialized. + WaitingControlPlaneInitializedReason = "WaitingControlPlaneInitialized" + + // WaitingForBootstrapDataReason (Severity=Info) documents provisioning waiting for the bootstrap + // data to be ready before starting to create the CloudMachine/VM. + WaitingForBootstrapDataReason = "WaitingForBootstrapData" +) + +var ( // TODO: make this configurable + nodeStartupDuration = 10 * time.Second + nodeStartupJitter = 0.3 +) + +const ( + // NodeProvisionedCondition documents the status of the provisioning of the Kubernetes node. + NodeProvisionedCondition clusterv1.ConditionType = "NodeProvisioned" + + // NodeWaitingForStartupTimeoutReason (Severity=Info) documents the Kubernetes Node provisioning. + NodeWaitingForStartupTimeoutReason = "WaitingForStartupTimeout" +) + +var ( // TODO: make this configurable + etcdStartupDuration = 10 * time.Second + etcdStartupJitter = 0.3 +) + +const ( + // EtcdProvisionedCondition documents the status of the provisioning of the etcd member. + EtcdProvisionedCondition clusterv1.ConditionType = "EtcdProvisioned" + + // EtcdWaitingForStartupTimeoutReason (Severity=Info) documents the etcd pod provisioning. + EtcdWaitingForStartupTimeoutReason = "WaitingForStartupTimeout" +) + +var ( // TODO: make this configurable + apiServerStartupDuration = 10 * time.Second + apiServerStartupJitter = 0.3 +) + +const ( + // APIServerProvisionedCondition documents the status of the provisioning of the APIServer instance. + APIServerProvisionedCondition clusterv1.ConditionType = "APIServerProvisioned" + + // APIServerWaitingForStartupTimeoutReason (Severity=Info) documents the API server pod provisioning. + APIServerWaitingForStartupTimeoutReason = "WaitingForStartupTimeout" +) + +// defines annotations to be applied to in memory etcd pods in order to track etcd cluster +// info belonging to the etcd member each pod represent. +const ( + // EtcdClusterIDAnnotationName defines the name of the annotation applied to in memory etcd + // pods to track the cluster ID of the etcd member each pod represent. + EtcdClusterIDAnnotationName = "etcd.inmemory.infrastructure.cluster.x-k8s.io/cluster-id" + + // EtcdMemberIDAnnotationName defines the name of the annotation applied to in memory etcd + // pods to track the member ID of the etcd member each pod represent. + EtcdMemberIDAnnotationName = "etcd.inmemory.infrastructure.cluster.x-k8s.io/member-id" + + // EtcdLeaderFromAnnotationName defines the name of the annotation applied to in memory etcd + // pods to track leadership status of the etcd member each pod represent. + // Note: We are tracking the time from an etcd member is leader; if more than one pod has this + // annotation, the last etcd member that became leader is the current leader. + // By using this mechanism leadership can be forwarded to another pod with an atomic operation + // (add/update of the annotation to the pod/etcd member we are forwarding leadership to). + EtcdLeaderFromAnnotationName = "etcd.inmemory.infrastructure.cluster.x-k8s.io/leader-from" + + // EtcdMemberRemoved is added to etcd pods which have been removed from the etcd cluster. + EtcdMemberRemoved = "etcd.inmemory.infrastructure.cluster.x-k8s.io/member-removed" +) + +type ConditionsTracker interface { + client.Object + conditions.Getter + conditions.Setter +} + +type vmBootstrapReconciler struct { + Client client.Client + InMemoryManager inmemoryruntime.Manager + APIServerMux *inmemoryserver.WorkloadClustersMux + + IsVMReady func() bool + GetProviderID func() string +} + +func (r *vmBootstrapReconciler) reconcileBoostrap(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { + log := ctrl.LoggerFrom(ctx) + + if !conditions.Has(conditionsTracker, VMProvisionedCondition) { + conditions.MarkFalse(conditionsTracker, VMProvisionedCondition, WaitingForVMInfrastructureReason, clusterv1.ConditionSeverityInfo, "") + } + + // Make sure bootstrap data is available and populated. + // NOTE: we are not using bootstrap data, but we wait for it in order to simulate a real machine provisioning workflow. + if machine.Spec.Bootstrap.DataSecretName == nil { + if !util.IsControlPlaneMachine(machine) && !conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { + conditions.MarkFalse(conditionsTracker, VMProvisionedCondition, WaitingControlPlaneInitializedReason, clusterv1.ConditionSeverityInfo, "") + log.Info("Waiting for the control plane to be initialized") + return reconcile.Result{RequeueAfter: 5 * time.Second}, nil // keep requeueing since we don't have a watch on machines // TODO: check if we can avoid this + } + + conditions.MarkFalse(conditionsTracker, VMProvisionedCondition, WaitingForBootstrapDataReason, clusterv1.ConditionSeverityInfo, "") + log.Info("Waiting for the Bootstrap provider controller to set bootstrap data") + return reconcile.Result{RequeueAfter: 5 * time.Second}, nil // keep requeueing since we don't have a watch on machines // TODO: check if we can avoid this + } + + // Check if the infrastructure is ready and the Bios UUID to be set (required for computing the Provide ID), otherwise return and wait for the vsphereVM object to be updated + if !r.IsVMReady() { + log.Info("Waiting for machine infrastructure to become ready") + return reconcile.Result{}, nil // TODO: check if we can avoid this + } + if !conditions.IsTrue(conditionsTracker, VMProvisionedCondition) { + conditions.MarkTrue(conditionsTracker, VMProvisionedCondition) + } + + // Call the inner reconciliation methods. + phases := []func(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error){ + r.reconcileBoostrapNode, + r.reconcileBoostrapETCD, + r.reconcileBoostrapAPIServer, + r.reconcileBoostrapScheduler, + r.reconcileBoostrapControllerManager, + r.reconcileBoostrapKubeadmObjects, + r.reconcileBoostrapKubeProxy, + r.reconcileBoostrapCoredns, + } + + res := ctrl.Result{} + errs := make([]error, 0) + for _, phase := range phases { + phaseResult, err := phase(ctx, cluster, machine, conditionsTracker) + if err != nil { + errs = append(errs, err) + } + if len(errs) > 0 { + continue + } + res = capiutil.LowestNonZeroResult(res, phaseResult) + } + return res, kerrors.NewAggregate(errs) +} + +func (r *vmBootstrapReconciler) reconcileBoostrapNode(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { + log := ctrl.LoggerFrom(ctx) + nodeName := conditionsTracker.GetName() + + provisioningDuration := nodeStartupDuration + provisioningDuration += time.Duration(rand.Float64() * nodeStartupJitter * float64(provisioningDuration)) //nolint:gosec // Intentionally using a weak random number generator here. + + start := conditions.Get(conditionsTracker, VMProvisionedCondition).LastTransitionTime + now := time.Now() + if now.Before(start.Add(provisioningDuration)) { + conditions.MarkFalse(conditionsTracker, NodeProvisionedCondition, NodeWaitingForStartupTimeoutReason, clusterv1.ConditionSeverityInfo, "") + remainingTime := start.Add(provisioningDuration).Sub(now) + log.Info("Waiting for Node to start", "Start", start, "Duration", provisioningDuration, "RemainingTime", remainingTime, "Node", nodeName) + return ctrl.Result{RequeueAfter: remainingTime}, nil + } + + // Compute the resource group unique name. + // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. + resourceGroup := klog.KObj(cluster).String() + inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() + + // Create Node + // TODO: consider if to handle an additional setting adding a delay in between create node and node ready/provider ID being set + node := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + }, + Spec: corev1.NodeSpec{ + ProviderID: r.GetProviderID(), + }, + Status: corev1.NodeStatus{ + Conditions: []corev1.NodeCondition{ + { + Type: corev1.NodeReady, + Status: corev1.ConditionTrue, + }, + }, + }, + } + if util.IsControlPlaneMachine(machine) { + if node.Labels == nil { + node.Labels = map[string]string{} + } + node.Labels["node-role.kubernetes.io/control-plane"] = "" + } + + if err := inmemoryClient.Get(ctx, client.ObjectKeyFromObject(node), node); err != nil { + if !apierrors.IsNotFound(err) { + return ctrl.Result{}, errors.Wrapf(err, "failed to get node") + } + + // NOTE: for the first control plane machine we might create the node before etcd and API server pod are running + // but this is not an issue, because it won't be visible to CAPI until the API server start serving requests. + if err := inmemoryClient.Create(ctx, node); err != nil && !apierrors.IsAlreadyExists(err) { + return ctrl.Result{}, errors.Wrapf(err, "failed to create Node") + } + } + + conditions.MarkTrue(conditionsTracker, NodeProvisionedCondition) + log.Info("Node created", "Node", klog.KObj(node)) + return ctrl.Result{}, nil +} + +func (r *vmBootstrapReconciler) reconcileBoostrapETCD(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { + log := ctrl.LoggerFrom(ctx) + etcdMember := fmt.Sprintf("etcd-%s", conditionsTracker.GetName()) + + // No-op if the machine is not a control plane machine. + if !util.IsControlPlaneMachine(machine) { + return ctrl.Result{}, nil + } + + // No-op if the Node is not provisioned yet + if !conditions.IsTrue(conditionsTracker, NodeProvisionedCondition) { + return ctrl.Result{}, nil + } + + // Wait for the etcd pod to start up; etcd pod start happens a configurable time after the Node is provisioned. + provisioningDuration := etcdStartupDuration + provisioningDuration += time.Duration(rand.Float64() * etcdStartupJitter * float64(provisioningDuration)) //nolint:gosec // Intentionally using a weak random number generator here. + + start := conditions.Get(conditionsTracker, NodeProvisionedCondition).LastTransitionTime + now := time.Now() + if now.Before(start.Add(provisioningDuration)) { + conditions.MarkFalse(conditionsTracker, EtcdProvisionedCondition, EtcdWaitingForStartupTimeoutReason, clusterv1.ConditionSeverityInfo, "") + remainingTime := start.Add(provisioningDuration).Sub(now) + log.Info("Waiting for etcd Pod to start", "Start", start, "Duration", provisioningDuration, "RemainingTime", remainingTime, "Pod", klog.KRef(metav1.NamespaceSystem, etcdMember)) + return ctrl.Result{RequeueAfter: remainingTime}, nil + } + + // Compute the resource group unique name. + // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. + resourceGroup := klog.KObj(cluster).String() + inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() + + // Create the etcd pod + // TODO: consider if to handle an additional setting adding a delay in between create pod and pod ready + etcdPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceSystem, + Name: etcdMember, + Labels: map[string]string{ + "component": "etcd", + "tier": "control-plane", + }, + }, + Spec: corev1.PodSpec{ + NodeName: conditionsTracker.GetName(), + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + }, + }, + } + if err := inmemoryClient.Get(ctx, client.ObjectKeyFromObject(etcdPod), etcdPod); err != nil { + if !apierrors.IsNotFound(err) { + return ctrl.Result{}, errors.Wrapf(err, "failed to get etcd Pod") + } + + // Gets info about the current etcd cluster, if any. + info, err := r.getEtcdInfo(ctx, inmemoryClient) + if err != nil { + return ctrl.Result{}, err + } + + // If this is the first etcd member in the cluster, assign a cluster ID + if info.clusterID == "" { + for { + info.clusterID = fmt.Sprintf("%d", rand.Uint32()) //nolint:gosec // weak random number generator is good enough here + if info.clusterID != "0" { + break + } + } + } + + // Computes a unique memberID. + var memberID string + for { + memberID = fmt.Sprintf("%d", rand.Uint32()) //nolint:gosec // weak random number generator is good enough here + if !info.members.Has(memberID) && memberID != "0" { + break + } + } + + // Annotate the pod with the info about the etcd cluster. + etcdPod.Annotations = map[string]string{ + EtcdClusterIDAnnotationName: info.clusterID, + EtcdMemberIDAnnotationName: memberID, + } + + // If the etcd cluster is being created it doesn't have a leader yet, so set this member as a leader. + if info.leaderID == "" { + etcdPod.Annotations[EtcdLeaderFromAnnotationName] = time.Now().Format(time.RFC3339) + } + + // NOTE: for the first control plane machine we might create the etcd pod before the API server pod is running + // but this is not an issue, because it won't be visible to CAPI until the API server start serving requests. + if err := inmemoryClient.Create(ctx, etcdPod); err != nil && !apierrors.IsAlreadyExists(err) { + return ctrl.Result{}, errors.Wrapf(err, "failed to create Pod") + } + } + + // If there is not yet an etcd member listener for this machine, add it to the server. + if !r.APIServerMux.HasEtcdMember(resourceGroup, etcdMember) { + // Getting the etcd CA + s, err := secret.Get(ctx, r.Client, client.ObjectKeyFromObject(cluster), secret.EtcdCA) + if err != nil { + return ctrl.Result{}, errors.Wrapf(err, "failed to get etcd CA") + } + certData, exists := s.Data[secret.TLSCrtDataName] + if !exists { + return ctrl.Result{}, errors.Errorf("invalid etcd CA: missing data for %s", secret.TLSCrtDataName) + } + + cert, err := certs.DecodeCertPEM(certData) + if err != nil { + return ctrl.Result{}, errors.Wrapf(err, "invalid etcd CA: invalid %s", secret.TLSCrtDataName) + } + + keyData, exists := s.Data[secret.TLSKeyDataName] + if !exists { + return ctrl.Result{}, errors.Errorf("invalid etcd CA: missing data for %s", secret.TLSKeyDataName) + } + + key, err := certs.DecodePrivateKeyPEM(keyData) + if err != nil { + return ctrl.Result{}, errors.Wrapf(err, "invalid etcd CA: invalid %s", secret.TLSKeyDataName) + } + + if err := r.APIServerMux.AddEtcdMember(resourceGroup, etcdMember, cert, key.(*rsa.PrivateKey)); err != nil { + return ctrl.Result{}, errors.Wrap(err, "failed to start etcd member") + } + } + + conditions.MarkTrue(conditionsTracker, EtcdProvisionedCondition) + log.Info("etcd Pod started", "Pod", klog.KObj(etcdPod)) + return ctrl.Result{}, nil +} + +func (r *vmBootstrapReconciler) reconcileBoostrapAPIServer(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { + log := ctrl.LoggerFrom(ctx) + apiServer := fmt.Sprintf("kube-apiserver-%s", conditionsTracker.GetName()) + + // No-op if the machine is not a control plane machine. + if !util.IsControlPlaneMachine(machine) { + return ctrl.Result{}, nil + } + + // No-op if the Node is not provisioned yet + if !conditions.IsTrue(conditionsTracker, NodeProvisionedCondition) { + return ctrl.Result{}, nil + } + + // Wait for the API server pod to start up; API server pod start happens a configurable time after the Node is provisioned. + provisioningDuration := apiServerStartupDuration + provisioningDuration += time.Duration(rand.Float64() * apiServerStartupJitter * float64(provisioningDuration)) //nolint:gosec // Intentionally using a weak random number generator here. + + start := conditions.Get(conditionsTracker, NodeProvisionedCondition).LastTransitionTime + now := time.Now() + if now.Before(start.Add(provisioningDuration)) { + conditions.MarkFalse(conditionsTracker, APIServerProvisionedCondition, APIServerWaitingForStartupTimeoutReason, clusterv1.ConditionSeverityInfo, "") + remainingTime := start.Add(provisioningDuration).Sub(now) + log.Info("Waiting for API server Pod to start", "Start", start, "Duration", provisioningDuration, "RemainingTime", remainingTime, "Pod", klog.KRef(metav1.NamespaceSystem, apiServer)) + return ctrl.Result{RequeueAfter: remainingTime}, nil + } + + // Compute the resource group unique name. + // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. + resourceGroup := klog.KObj(cluster).String() + inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() + + // Create the apiserver pod + // TODO: consider if to handle an additional setting adding a delay in between create pod and pod ready + + apiServerPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceSystem, + Name: apiServer, + Labels: map[string]string{ + "component": "kube-apiserver", + "tier": "control-plane", + }, + }, + Spec: corev1.PodSpec{ + NodeName: conditionsTracker.GetName(), + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + }, + }, + } + if err := inmemoryClient.Get(ctx, client.ObjectKeyFromObject(apiServerPod), apiServerPod); err != nil { + if !apierrors.IsNotFound(err) { + return ctrl.Result{}, errors.Wrapf(err, "failed to get apiServer Pod") + } + + if err := inmemoryClient.Create(ctx, apiServerPod); err != nil && !apierrors.IsAlreadyExists(err) { + return ctrl.Result{}, errors.Wrapf(err, "failed to create apiServer Pod") + } + } + + // If there is not yet an API server listener for this machine. + if !r.APIServerMux.HasAPIServer(resourceGroup, apiServer) { + // Getting the Kubernetes CA + s, err := secret.Get(ctx, r.Client, client.ObjectKeyFromObject(cluster), secret.ClusterCA) + if err != nil { + return ctrl.Result{}, errors.Wrapf(err, "failed to get cluster CA") + } + certData, exists := s.Data[secret.TLSCrtDataName] + if !exists { + return ctrl.Result{}, errors.Errorf("invalid cluster CA: missing data for %s", secret.TLSCrtDataName) + } + + cert, err := certs.DecodeCertPEM(certData) + if err != nil { + return ctrl.Result{}, errors.Wrapf(err, "invalid cluster CA: invalid %s", secret.TLSCrtDataName) + } + + keyData, exists := s.Data[secret.TLSKeyDataName] + if !exists { + return ctrl.Result{}, errors.Errorf("invalid cluster CA: missing data for %s", secret.TLSKeyDataName) + } + + key, err := certs.DecodePrivateKeyPEM(keyData) + if err != nil { + return ctrl.Result{}, errors.Wrapf(err, "invalid cluster CA: invalid %s", secret.TLSKeyDataName) + } + + // Adding the APIServer. + // NOTE: When the first APIServer is added, the workload cluster listener is started. + if err := r.APIServerMux.AddAPIServer(resourceGroup, apiServer, cert, key.(*rsa.PrivateKey)); err != nil { + return ctrl.Result{}, errors.Wrap(err, "failed to start API server") + } + } + + conditions.MarkTrue(conditionsTracker, APIServerProvisionedCondition) + log.Info("API server Pod started", "Pod", klog.KObj(apiServerPod)) + return ctrl.Result{}, nil +} + +func (r *vmBootstrapReconciler) reconcileBoostrapScheduler(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { + // No-op if the machine is not a control plane machine. + if !util.IsControlPlaneMachine(machine) { + return ctrl.Result{}, nil + } + + // NOTE: we are creating the scheduler pod to make KCP happy, but we are not implementing any + // specific behaviour for this component because they are not relevant for stress tests. + // As a current approximation, we create the scheduler as soon as the API server is provisioned; + // also, the scheduler is immediately marked as ready. + if !conditions.IsTrue(conditionsTracker, APIServerProvisionedCondition) { + return ctrl.Result{}, nil + } + + // Compute the resource group unique name. + // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. + resourceGroup := klog.KObj(cluster).String() + inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() + + schedulerPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceSystem, + Name: fmt.Sprintf("kube-scheduler-%s", conditionsTracker.GetName()), + Labels: map[string]string{ + "component": "kube-scheduler", + "tier": "control-plane", + }, + }, + Spec: corev1.PodSpec{ + NodeName: conditionsTracker.GetName(), + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + }, + }, + } + if err := inmemoryClient.Create(ctx, schedulerPod); err != nil && !apierrors.IsAlreadyExists(err) { + return ctrl.Result{}, errors.Wrapf(err, "failed to create scheduler Pod") + } + + return ctrl.Result{}, nil +} + +func (r *vmBootstrapReconciler) reconcileBoostrapControllerManager(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { + // No-op if the machine is not a control plane machine. + if !util.IsControlPlaneMachine(machine) { + return ctrl.Result{}, nil + } + + // NOTE: we are creating the controller manager pod to make KCP happy, but we are not implementing any + // specific behaviour for this component because they are not relevant for stress tests. + // As a current approximation, we create the controller manager as soon as the API server is provisioned; + // also, the controller manager is immediately marked as ready. + if !conditions.IsTrue(conditionsTracker, APIServerProvisionedCondition) { + return ctrl.Result{}, nil + } + + // Compute the resource group unique name. + // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. + resourceGroup := klog.KObj(cluster).String() + inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() + + controllerManagerPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceSystem, + Name: fmt.Sprintf("kube-controller-manager-%s", conditionsTracker.GetName()), + Labels: map[string]string{ + "component": "kube-controller-manager", + "tier": "control-plane", + }, + }, + Spec: corev1.PodSpec{ + NodeName: conditionsTracker.GetName(), + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + }, + }, + } + if err := inmemoryClient.Create(ctx, controllerManagerPod); err != nil && !apierrors.IsAlreadyExists(err) { + return ctrl.Result{}, errors.Wrapf(err, "failed to create controller manager Pod") + } + + return ctrl.Result{}, nil +} + +func (r *vmBootstrapReconciler) reconcileBoostrapKubeadmObjects(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, _ ConditionsTracker) (ctrl.Result, error) { + // No-op if the machine is not a control plane machine. + if !util.IsControlPlaneMachine(machine) { + return ctrl.Result{}, nil + } + + // Compute the resource group unique name. + // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. + resourceGroup := klog.KObj(cluster).String() + inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() + + // create kubeadm ClusterRole and ClusterRoleBinding enforced by KCP + // NOTE: we create those objects because this is what kubeadm does, but KCP creates + // ClusterRole and ClusterRoleBinding if not found. + + role := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: "kubeadm:get-nodes", + }, + Rules: []rbacv1.PolicyRule{ + { + Verbs: []string{"get"}, + APIGroups: []string{""}, + Resources: []string{"nodes"}, + }, + }, + } + if err := inmemoryClient.Create(ctx, role); err != nil && !apierrors.IsAlreadyExists(err) { + return ctrl.Result{}, errors.Wrapf(err, "failed to create kubeadm:get-nodes ClusterRole") + } + + roleBinding := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "kubeadm:get-nodes", + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "ClusterRole", + Name: "kubeadm:get-nodes", + }, + Subjects: []rbacv1.Subject{ + { + Kind: rbacv1.GroupKind, + Name: "system:bootstrappers:kubeadm:default-node-token", + }, + }, + } + if err := inmemoryClient.Create(ctx, roleBinding); err != nil && !apierrors.IsAlreadyExists(err) { + return ctrl.Result{}, errors.Wrapf(err, "failed to create kubeadm:get-nodes ClusterRoleBinding") + } + + // create kubeadm config map + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "kubeadm-config", + Namespace: metav1.NamespaceSystem, + }, + Data: map[string]string{ + "ClusterConfiguration": "", + }, + } + if err := inmemoryClient.Create(ctx, cm); err != nil && !apierrors.IsAlreadyExists(err) { + return ctrl.Result{}, errors.Wrapf(err, "failed to create kubeadm-config ConfigMap") + } + + return ctrl.Result{}, nil +} + +func (r *vmBootstrapReconciler) reconcileBoostrapKubeProxy(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, _ ConditionsTracker) (ctrl.Result, error) { + // No-op if the machine is not a control plane machine. + if !util.IsControlPlaneMachine(machine) { + return ctrl.Result{}, nil + } + + // TODO: Add provisioning time for KubeProxy. + + // Compute the resource group unique name. + // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. + resourceGroup := klog.KObj(cluster).String() + inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() + + // Create the kube-proxy-daemonset + kubeProxyDaemonSet := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceSystem, + Name: "kube-proxy", + Labels: map[string]string{ + "component": "kube-proxy", + }, + }, + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "kube-proxy", + Image: fmt.Sprintf("registry.k8s.io/kube-proxy:%s", *machine.Spec.Version), + }, + }, + }, + }, + }, + } + if err := inmemoryClient.Get(ctx, client.ObjectKeyFromObject(kubeProxyDaemonSet), kubeProxyDaemonSet); err != nil { + if !apierrors.IsNotFound(err) { + return ctrl.Result{}, errors.Wrapf(err, "failed to get kube-proxy DaemonSet") + } + + if err := inmemoryClient.Create(ctx, kubeProxyDaemonSet); err != nil && !apierrors.IsAlreadyExists(err) { + return ctrl.Result{}, errors.Wrapf(err, "failed to create kube-proxy DaemonSet") + } + } + return ctrl.Result{}, nil +} + +func (r *vmBootstrapReconciler) reconcileBoostrapCoredns(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, _ ConditionsTracker) (ctrl.Result, error) { + // No-op if the machine is not a control plane machine. + if !util.IsControlPlaneMachine(machine) { + return ctrl.Result{}, nil + } + + // TODO: Add provisioning time for CoreDNS. + + // Compute the resource group unique name. + // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. + resourceGroup := klog.KObj(cluster).String() + inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() + + // Create the coredns configMap. + corednsConfigMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceSystem, + Name: "coredns", + }, + Data: map[string]string{ + "Corefile": "ANG", + }, + } + if err := inmemoryClient.Get(ctx, client.ObjectKeyFromObject(corednsConfigMap), corednsConfigMap); err != nil { + if !apierrors.IsNotFound(err) { + return ctrl.Result{}, errors.Wrapf(err, "failed to get coreDNS configMap") + } + + if err := inmemoryClient.Create(ctx, corednsConfigMap); err != nil && !apierrors.IsAlreadyExists(err) { + return ctrl.Result{}, errors.Wrapf(err, "failed to create coreDNS configMap") + } + } + // Create the coredns deployment. + corednsDeployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceSystem, + Name: "coredns", + }, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "coredns", + Image: "registry.k8s.io/coredns/coredns:v1.10.1", + }, + }, + }, + }, + }, + } + + if err := inmemoryClient.Get(ctx, client.ObjectKeyFromObject(corednsDeployment), corednsDeployment); err != nil { + if !apierrors.IsNotFound(err) { + return ctrl.Result{}, errors.Wrapf(err, "failed to get coreDNS deployment") + } + + if err := inmemoryClient.Create(ctx, corednsDeployment); err != nil && !apierrors.IsAlreadyExists(err) { + return ctrl.Result{}, errors.Wrapf(err, "failed to create coreDNS deployment") + } + } + return ctrl.Result{}, nil +} + +func (r *vmBootstrapReconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { + // Call the inner reconciliation methods. + phases := []func(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error){ + r.reconcileDeleteNode, + r.reconcileDeleteETCD, + r.reconcileDeleteAPIServer, + r.reconcileDeleteScheduler, + r.reconcileDeleteControllerManager, + // Note: We are not deleting kubeadm objects because they exist in K8s, they are not related to a specific machine. + } + + res := ctrl.Result{} + errs := make([]error, 0) + for _, phase := range phases { + phaseResult, err := phase(ctx, cluster, machine, conditionsTracker) + if err != nil { + errs = append(errs, err) + } + if len(errs) > 0 { + continue + } + res = capiutil.LowestNonZeroResult(res, phaseResult) + } + if res.IsZero() && len(errs) == 0 { + controllerutil.RemoveFinalizer(conditionsTracker, infrav1.VMFinalizer) + } + return res, kerrors.NewAggregate(errs) +} + +func (r *vmBootstrapReconciler) reconcileDeleteNode(ctx context.Context, cluster *clusterv1.Cluster, _ *clusterv1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { + // Compute the resource group unique name. + // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. + resourceGroup := klog.KObj(cluster).String() + inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() + + // Delete Node + node := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: conditionsTracker.GetName(), + }, + } + + // TODO(killianmuldoon): check if we can drop this given that the MachineController is already draining pods and deleting nodes. + if err := inmemoryClient.Delete(ctx, node); err != nil && !apierrors.IsNotFound(err) { + return ctrl.Result{}, errors.Wrapf(err, "failed to delete Node") + } + + return ctrl.Result{}, nil +} + +func (r *vmBootstrapReconciler) reconcileDeleteETCD(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { + // No-op if the machine is not a control plane machine. + if !util.IsControlPlaneMachine(machine) { + return ctrl.Result{}, nil + } + + // Compute the resource group unique name. + // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. + resourceGroup := klog.KObj(cluster).String() + inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() + + etcdMember := fmt.Sprintf("etcd-%s", conditionsTracker.GetName()) + etcdPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceSystem, + Name: etcdMember, + }, + } + if err := inmemoryClient.Delete(ctx, etcdPod); err != nil && !apierrors.IsNotFound(err) { + return ctrl.Result{}, errors.Wrapf(err, "failed to delete etcd Pod") + } + if err := r.APIServerMux.DeleteEtcdMember(resourceGroup, etcdMember); err != nil { + return ctrl.Result{}, err + } + + // TODO: if all the etcd members are gone, cleanup all the k8s objects from the resource group. + // note: it is not possible to delete the resource group, because cloud resources should be preserved. + // given that, in order to implement this it is required to find a way to identify all the k8s resources (might be via gvk); + // also, deletion must happen suddenly, without respecting finalizers or owner references links. + + return ctrl.Result{}, nil +} + +func (r *vmBootstrapReconciler) reconcileDeleteAPIServer(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { + // No-op if the machine is not a control plane machine. + if !util.IsControlPlaneMachine(machine) { + return ctrl.Result{}, nil + } + + // Compute the resource group unique name. + // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. + resourceGroup := klog.KObj(cluster).String() + inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() + + apiServer := fmt.Sprintf("kube-apiserver-%s", conditionsTracker.GetName()) + apiServerPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceSystem, + Name: apiServer, + }, + } + if err := inmemoryClient.Delete(ctx, apiServerPod); err != nil && !apierrors.IsNotFound(err) { + return ctrl.Result{}, errors.Wrapf(err, "failed to delete apiServer Pod") + } + if err := r.APIServerMux.DeleteAPIServer(resourceGroup, apiServer); err != nil { + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil +} + +func (r *vmBootstrapReconciler) reconcileDeleteScheduler(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { + // No-op if the machine is not a control plane machine. + if !util.IsControlPlaneMachine(machine) { + return ctrl.Result{}, nil + } + + // Compute the resource group unique name. + // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. + resourceGroup := klog.KObj(cluster).String() + inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() + + schedulerPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceSystem, + Name: fmt.Sprintf("kube-scheduler-%s", conditionsTracker.GetName()), + }, + } + if err := inmemoryClient.Delete(ctx, schedulerPod); err != nil && !apierrors.IsNotFound(err) { + return ctrl.Result{}, errors.Wrapf(err, "failed to scheduler Pod") + } + + return ctrl.Result{}, nil +} + +func (r *vmBootstrapReconciler) reconcileDeleteControllerManager(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { + // No-op if the machine is not a control plane machine. + if !util.IsControlPlaneMachine(machine) { + return ctrl.Result{}, nil + } + + // Compute the resource group unique name. + // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. + resourceGroup := klog.KObj(cluster).String() + inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() + + controllerManagerPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceSystem, + Name: fmt.Sprintf("kube-controller-manager-%s", conditionsTracker.GetName()), + }, + } + if err := inmemoryClient.Delete(ctx, controllerManagerPod); err != nil && !apierrors.IsNotFound(err) { + return ctrl.Result{}, errors.Wrapf(err, "failed to controller manager Pod") + } + + return ctrl.Result{}, nil +} + +type etcdInfo struct { + clusterID string + leaderID string + members sets.Set[string] +} + +func (r *vmBootstrapReconciler) getEtcdInfo(ctx context.Context, inmemoryClient inmemoryruntime.Client) (etcdInfo, error) { + etcdPods := &corev1.PodList{} + if err := inmemoryClient.List(ctx, etcdPods, + client.InNamespace(metav1.NamespaceSystem), + client.MatchingLabels{ + "component": "etcd", + "tier": "control-plane"}, + ); err != nil { + return etcdInfo{}, errors.Wrap(err, "failed to list etcd members") + } + + if len(etcdPods.Items) == 0 { + return etcdInfo{}, nil + } + + info := etcdInfo{ + members: sets.New[string](), + } + var leaderFrom time.Time + for _, pod := range etcdPods.Items { + if _, ok := pod.Annotations[EtcdMemberRemoved]; ok { + continue + } + if info.clusterID == "" { + info.clusterID = pod.Annotations[EtcdClusterIDAnnotationName] + } else if pod.Annotations[EtcdClusterIDAnnotationName] != info.clusterID { + return etcdInfo{}, errors.New("invalid etcd cluster, members have different cluster ID") + } + memberID := pod.Annotations[EtcdMemberIDAnnotationName] + info.members.Insert(memberID) + + if t, err := time.Parse(time.RFC3339, pod.Annotations[EtcdLeaderFromAnnotationName]); err == nil { + if t.After(leaderFrom) { + info.leaderID = memberID + leaderFrom = t + } + } + } + + if info.leaderID == "" { + // TODO: consider if and how to automatically recover from this case + // note: this can happen also when reading etcd members in the server, might be it is something we have to take case before deletion... + // for now it should not be an issue because KCP forward etcd leadership before deletion. + return etcdInfo{}, errors.New("invalid etcd cluster, no leader found") + } + + return info, nil +} diff --git a/test/infrastructure/vcsim/controllers/vmip_controller.go b/test/infrastructure/vcsim/controllers/vmip_controller.go new file mode 100644 index 0000000000..7a3556d3e5 --- /dev/null +++ b/test/infrastructure/vcsim/controllers/vmip_controller.go @@ -0,0 +1,140 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "time" + + "github.com/pkg/errors" + "github.com/vmware/govmomi/vim25/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + govmominet "sigs.k8s.io/cluster-api-provider-vsphere/pkg/services/govmomi/net" + "sigs.k8s.io/cluster-api-provider-vsphere/pkg/session" +) + +type vmIPReconciler struct { + Client client.Client + EnableKeepAlive bool + KeepAliveDuration time.Duration + + IsVMWaitingforIP func() bool + GetVCenterSession func(ctx context.Context) (*session.Session, error) + GetVMPath func() string +} + +func (r *vmIPReconciler) ReconcileIP(ctx context.Context) (ctrl.Result, error) { + log := ctrl.LoggerFrom(ctx) + + // No op if f the VM is still provisioning, or it already has an IP, return. + if !r.IsVMWaitingforIP() { + return reconcile.Result{}, nil + } + + // Otherwise the VM is stuck waiting for IP (because there is no DHCP service in vcsim), then assign a fake IP. + + authSession, err := r.GetVCenterSession(ctx) + if err != nil { + return reconcile.Result{}, errors.Wrapf(err, "failed to get vcenter session") + } + + vm, err := authSession.Finder.VirtualMachine(ctx, r.GetVMPath()) + if err != nil { + return reconcile.Result{}, errors.Wrapf(err, "failed to find vm") + } + + // Check if the VM already has network status (but it is not yet surfaced in conditions) + netStatus, err := govmominet.GetNetworkStatus(ctx, authSession.Client.Client, vm.Reference()) + if err != nil { + return reconcile.Result{}, errors.Wrapf(err, "failed to get vm network status") + } + ipAddrs := []string{} + for _, s := range netStatus { + ipAddrs = append(ipAddrs, s.IPAddrs...) + } + if len(ipAddrs) > 0 { + // No op, the VM already has an IP, we should just wait for it to surface in K8s VirtualMachine/VSphereVM + return reconcile.Result{}, nil + } + + log.Info("Powering Off the VM before applying an IP") + task, err := vm.PowerOff(ctx) + if err != nil { + return reconcile.Result{}, errors.Wrapf(err, "failed to PowerOff vm") + } + if err = task.Wait(ctx); err != nil { + return reconcile.Result{}, errors.Wrapf(err, "failed to PowerOff vm task to complete") + } + + // Add a fake ip address. + spec := types.CustomizationSpec{ + NicSettingMap: []types.CustomizationAdapterMapping{ + { + Adapter: types.CustomizationIPSettings{ + Ip: &types.CustomizationFixedIp{ + IpAddress: "192.168.1.100", + }, + SubnetMask: "255.255.255.0", + Gateway: []string{"192.168.1.1"}, + DnsServerList: []string{"192.168.1.1"}, + DnsDomain: "ad.domain", + }, + }, + }, + Identity: &types.CustomizationLinuxPrep{ + HostName: &types.CustomizationFixedName{ + Name: "hostname", + }, + Domain: "ad.domain", + TimeZone: "Etc/UTC", + HwClockUTC: types.NewBool(true), + }, + GlobalIPSettings: types.CustomizationGlobalIPSettings{ + DnsSuffixList: []string{"ad.domain"}, + DnsServerList: []string{"192.168.1.1"}, + }, + } + + log.Info("Customizing the VM for applying an IP") + task, err = vm.Customize(ctx, spec) + if err != nil { + return reconcile.Result{}, errors.Wrapf(err, "failed to Customize vm") + } + if err = task.Wait(ctx); err != nil { + return reconcile.Result{}, errors.Wrapf(err, "failed to wait for Customize vm task to complete") + } + + log.Info("Powering On the VM after applying the IP") + task, err = vm.PowerOn(ctx) + if err != nil { + return reconcile.Result{}, errors.Wrapf(err, "failed to PowerOn vm") + } + if err = task.Wait(ctx); err != nil { + return reconcile.Result{}, errors.Wrapf(err, "failed to PowerOn vm task to complete") + } + + ip, err := vm.WaitForIP(ctx) + if err != nil { + return reconcile.Result{}, errors.Wrapf(err, "failed to WaitForIP") + } + log.Info("IP assigned to the VM", "ip", ip) + + return reconcile.Result{}, nil +} diff --git a/test/infrastructure/vcsim/controllers/vspherevm_controller.go b/test/infrastructure/vcsim/controllers/vspherevm_controller.go new file mode 100644 index 0000000000..3a5d53b3b1 --- /dev/null +++ b/test/infrastructure/vcsim/controllers/vspherevm_controller.go @@ -0,0 +1,305 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + "path" + "time" + + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + capiutil "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/annotations" + "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/cluster-api/util/predicates" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" + "sigs.k8s.io/cluster-api-provider-vsphere/pkg/identity" + "sigs.k8s.io/cluster-api-provider-vsphere/pkg/session" + "sigs.k8s.io/cluster-api-provider-vsphere/pkg/util" + inmemoryruntime "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/runtime" + inmemoryserver "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/server" +) + +// TODO: implement support for CAPV deployed in arbitrary ns (TBD if we need this). +const capvNamespace = "capv-system" + +type VSphereVMReconciler struct { + Client client.Client + InMemoryManager inmemoryruntime.Manager + APIServerMux *inmemoryserver.WorkloadClustersMux + EnableKeepAlive bool + KeepAliveDuration time.Duration + + // WatchFilterValue is the label value used to filter events prior to reconciliation. + WatchFilterValue string +} + +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=vspherevms,verbs=get;list;watch;update;patch;delete +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=vsphereclusters,verbs=get;list;watch +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=vspheremachines,verbs=get;list;watch +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=vsphereclusteridentities,verbs=get;list;watch +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines,verbs=get;list;watch +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch + +// Reconcile ensures the back-end state reflects the Kubernetes resource state intent. +func (r *VSphereVMReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { + log := ctrl.LoggerFrom(ctx) + + // Fetch the VSphereVM instance + vSphereVM := &infrav1.VSphereVM{} + if err := r.Client.Get(ctx, req.NamespacedName, vSphereVM); err != nil { + if apierrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{}, err + } + + // Fetch the owner VSphereMachine. + vSphereMachine, err := util.GetOwnerVSphereMachine(ctx, r.Client, vSphereVM.ObjectMeta) + // vsphereMachine can be nil in cases where custom mover other than clusterctl + // moves the resources without ownerreferences set + // in that case nil vsphereMachine can cause panic and CrashLoopBackOff the pod + // preventing vspheremachine_controller from setting the ownerref + if err != nil || vSphereMachine == nil { + log.Info("Owner VSphereMachine not found, won't reconcile") + return reconcile.Result{}, err + } + log = log.WithValues("VSphereMachine", klog.KObj(vSphereMachine)) + ctx = ctrl.LoggerInto(ctx, log) + + // Fetch the Machine. + machine, err := capiutil.GetOwnerMachine(ctx, r.Client, vSphereMachine.ObjectMeta) + if err != nil { + return ctrl.Result{}, err + } + if machine == nil { + log.Info("Waiting for Machine Controller to set OwnerRef on VSphereMachine") + return ctrl.Result{}, nil + } + log = log.WithValues("Machine", klog.KObj(machine)) + ctx = ctrl.LoggerInto(ctx, log) + + // Fetch the Cluster. + cluster, err := capiutil.GetClusterFromMetadata(ctx, r.Client, machine.ObjectMeta) + if err != nil { + log.Info("VSphereVM owner Machine is missing cluster label or cluster does not exist") + return ctrl.Result{}, err + } + if cluster == nil { + log.Info(fmt.Sprintf("Please associate this machine with a cluster using the label %s: ", clusterv1.ClusterNameLabel)) + return ctrl.Result{}, nil + } + log = log.WithValues("Cluster", klog.KObj(cluster)) + ctx = ctrl.LoggerInto(ctx, log) + + // Return early if the object or Cluster is paused. + if annotations.IsPaused(cluster, vSphereVM) { + log.Info("Reconciliation is paused for this object") + return ctrl.Result{}, nil + } + + // Fetch the VSphereCluster. + key := client.ObjectKey{ + Namespace: cluster.Namespace, + Name: cluster.Spec.InfrastructureRef.Name, + } + vSphereCluster := &infrav1.VSphereCluster{} + if err := r.Client.Get(ctx, key, vSphereCluster); err != nil { + log.Info("VSphereCluster can't be retrieved") + return ctrl.Result{}, err + } + log = log.WithValues("VSphereCluster", klog.KObj(vSphereCluster)) + ctx = ctrl.LoggerInto(ctx, log) + + // Compute the resource group unique name. + // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. + resourceGroup := klog.KObj(cluster).String() + + // Check if there is a conditionsTracker in the resource group. + // The conditionsTracker is an object stored in memory with the scope of storing conditions used for keeping + // track of the provisioning process of the fake node, etcd, api server, etc for this specific vSphereVM. + // (the process managed by this controller). + inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() + // NOTE: The type of the in memory conditionsTracker object doesn't matter as soon as it implements Cluster API's conditions interfaces. + conditionsTracker := &infrav1.VSphereVM{} + if err := inmemoryClient.Get(ctx, client.ObjectKeyFromObject(vSphereVM), conditionsTracker); err != nil { + if !apierrors.IsNotFound(err) { + return ctrl.Result{}, errors.Wrap(err, "failed to get conditionsTracker") + } + + conditionsTracker = &infrav1.VSphereVM{ + ObjectMeta: metav1.ObjectMeta{ + Name: vSphereVM.Name, + Namespace: vSphereVM.Namespace, + }, + } + if err := inmemoryClient.Create(ctx, conditionsTracker); err != nil { + return ctrl.Result{}, errors.Wrap(err, "failed to create conditionsTracker") + } + } + + // Initialize the patch helper + patchHelper, err := patch.NewHelper(vSphereVM, r.Client) + if err != nil { + return ctrl.Result{}, err + } + + // Always attempt to Patch the VSphereVM + conditionsTracker object and status after each reconciliation. + defer func() { + // NOTE: Patch on VSphereVM will only add/remove a finalizer. + if err := patchHelper.Patch(ctx, vSphereVM); err != nil { + reterr = kerrors.NewAggregate([]error{reterr, err}) + } + + // NOTE: Patch on conditionsTracker will only track of provisioning process of the fake node, etcd, api server, etc. + if err := inmemoryClient.Update(ctx, conditionsTracker); err != nil { + reterr = kerrors.NewAggregate([]error{reterr, err}) + } + }() + + // Handle deleted machines + if !vSphereMachine.DeletionTimestamp.IsZero() { + return r.reconcileDelete(ctx, cluster, vSphereCluster, machine, vSphereVM, conditionsTracker) + } + + // Add finalizer first if not set to avoid the race condition between init and delete. + // Note: Finalizers in general can only be added when the deletionTimestamp is not set. + if !controllerutil.ContainsFinalizer(vSphereVM, VMFinalizer) { + controllerutil.AddFinalizer(vSphereVM, VMFinalizer) + return ctrl.Result{}, nil + } + + // Handle non-deleted machines + return r.reconcileNormal(ctx, cluster, vSphereCluster, machine, vSphereVM, conditionsTracker) +} + +func (r *VSphereVMReconciler) reconcileNormal(ctx context.Context, cluster *clusterv1.Cluster, vSphereCluster *infrav1.VSphereCluster, machine *clusterv1.Machine, vSphereVM *infrav1.VSphereVM, conditionsTracker *infrav1.VSphereVM) (ctrl.Result, error) { + ipReconciler := r.getVMIpReconciler(vSphereCluster, vSphereVM) + if ret, err := ipReconciler.ReconcileIP(ctx); !ret.IsZero() || err != nil { + return ret, err + } + + bootstrapReconciler := r.getVMBootstrapReconciler(vSphereVM) + if ret, err := bootstrapReconciler.reconcileBoostrap(ctx, cluster, machine, conditionsTracker); !ret.IsZero() || err != nil { + return ret, err + } + + return ctrl.Result{}, nil +} + +func (r *VSphereVMReconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Cluster, _ *infrav1.VSphereCluster, machine *clusterv1.Machine, vSphereVM *infrav1.VSphereVM, conditionsTracker *infrav1.VSphereVM) (ctrl.Result, error) { + bootstrapReconciler := r.getVMBootstrapReconciler(vSphereVM) + if ret, err := bootstrapReconciler.reconcileDelete(ctx, cluster, machine, conditionsTracker); !ret.IsZero() || err != nil { + return ret, err + } + + controllerutil.RemoveFinalizer(vSphereVM, VMFinalizer) + return ctrl.Result{}, nil +} + +func (r *VSphereVMReconciler) getVMIpReconciler(vSphereCluster *infrav1.VSphereCluster, vSphereVM *infrav1.VSphereVM) *vmIPReconciler { + return &vmIPReconciler{ + Client: r.Client, + EnableKeepAlive: r.EnableKeepAlive, + KeepAliveDuration: r.KeepAliveDuration, + + // Type specific functions; those functions wraps the differences between govmomi and supervisor types, + // thus allowing to use the same vmIPReconciler in both scenarios. + GetVCenterSession: func(ctx context.Context) (*session.Session, error) { + // Return a connection to the vCenter where the vSphereVM is hosted + return r.getVCenterSession(ctx, vSphereCluster, vSphereVM) + }, + IsVMWaitingforIP: func() bool { + // A vSphereVM is waiting for an IP when not ready VMProvisioned condition is false with reason WaitingForIPAllocation + return !vSphereVM.Status.Ready && conditions.IsFalse(vSphereVM, infrav1.VMProvisionedCondition) && conditions.GetReason(vSphereVM, infrav1.VMProvisionedCondition) == infrav1.WaitingForIPAllocationReason + }, + GetVMPath: func() string { + // Return the path where the VM is stored. + return path.Join(vSphereVM.Spec.Folder, vSphereVM.Name) + }, + } +} + +func (r *VSphereVMReconciler) getVMBootstrapReconciler(vSphereVM *infrav1.VSphereVM) *vmBootstrapReconciler { + return &vmBootstrapReconciler{ + Client: r.Client, + InMemoryManager: r.InMemoryManager, + APIServerMux: r.APIServerMux, + + // Type specific functions; those functions wraps the differences between govmomi and supervisor types, + // thus allowing to use the same vmBootstrapReconciler in both scenarios. + IsVMReady: func() bool { + // A vSphereVM is ready to provision fake objects hosted on it when both ready and BiosUUID is set (bios id is required when provisioning the node to compute the Provider ID) + return vSphereVM.Status.Ready && vSphereVM.Spec.BiosUUID != "" + }, + GetProviderID: func() string { + // Computes the ProviderID for the node hosted on the vSphereVM + return util.ConvertUUIDToProviderID(vSphereVM.Spec.BiosUUID) + }, + } +} + +func (r *VSphereVMReconciler) getVCenterSession(ctx context.Context, vSphereCluster *infrav1.VSphereCluster, vSphereVM *infrav1.VSphereVM) (*session.Session, error) { + if vSphereCluster.Spec.IdentityRef == nil { + return nil, errors.New("vcsim do not support using credentials provided to the manager") + } + + creds, err := identity.GetCredentials(ctx, r.Client, vSphereCluster, capvNamespace) + if err != nil { + return nil, errors.Wrap(err, "failed to retrieve credentials from IdentityRef") + } + + params := session.NewParams(). + WithServer(vSphereVM.Spec.Server). + WithDatacenter(vSphereVM.Spec.Datacenter). + WithUserInfo(creds.Username, creds.Password). + WithThumbprint(vSphereVM.Spec.Thumbprint). + WithFeatures(session.Feature{ + EnableKeepAlive: r.EnableKeepAlive, + KeepAliveDuration: r.KeepAliveDuration, + }) + + return session.GetOrCreate(ctx, params) +} + +// SetupWithManager will add watches for this controller. +func (r *VSphereVMReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { + err := ctrl.NewControllerManagedBy(mgr). + For(&infrav1.VSphereVM{}). + WithOptions(options). + WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). + Complete(r) + + if err != nil { + return errors.Wrap(err, "failed setting up with a controller manager") + } + return nil +} diff --git a/test/infrastructure/vcsim/controllers/vspherevm_controller_test.go b/test/infrastructure/vcsim/controllers/vspherevm_controller_test.go new file mode 100644 index 0000000000..dc0ad00817 --- /dev/null +++ b/test/infrastructure/vcsim/controllers/vspherevm_controller_test.go @@ -0,0 +1,303 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "testing" + "time" + + . "github.com/onsi/gomega" + vmoprv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2" + "k8s.io/utils/ptr" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util/conditions" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" + vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" + inmemoryruntime "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/runtime" + inmemoryserver "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/server" + vcsimv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/api/v1alpha1" +) + +var ( + cloudScheme = runtime.NewScheme() + scheme = runtime.NewScheme() + + ctx = context.Background() +) + +func init() { + // scheme used for operating on the management cluster. + _ = clusterv1.AddToScheme(scheme) + _ = infrav1.AddToScheme(scheme) + _ = vmwarev1.AddToScheme(scheme) + _ = vmoprv1.AddToScheme(scheme) + _ = vcsimv1.AddToScheme(scheme) + + // scheme used for operating on the cloud resource. + _ = infrav1.AddToScheme(cloudScheme) + _ = corev1.AddToScheme(cloudScheme) + _ = appsv1.AddToScheme(cloudScheme) + _ = rbacv1.AddToScheme(cloudScheme) +} + +func Test_Reconcile_VSphereVM(t *testing.T) { + t.Run("VSphereMachine not yet provisioned should be ignored", func(t *testing.T) { + g := NewWithT(t) + + vsphereCluster := &infrav1.VSphereCluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + UID: "bar", + }, + } + + cluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + UID: "bar", + }, + Spec: clusterv1.ClusterSpec{ + InfrastructureRef: &corev1.ObjectReference{ + APIVersion: infrav1.GroupVersion.String(), + Kind: "VSphereCluster", + Namespace: vsphereCluster.Namespace, + Name: vsphereCluster.Name, + UID: vsphereCluster.UID, + }, + }, + } + + machine := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + Labels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + }, + }, + } + + vSphereMachine := &infrav1.VSphereMachine{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "baz", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Machine", + Name: machine.Name, + UID: machine.UID, + }, + }, + }, + } + + vSphereVM := &infrav1.VSphereVM{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: infrav1.GroupVersion.String(), + Kind: "VSphereMachine", + Name: vSphereMachine.Name, + UID: vSphereMachine.UID, + }, + }, + Finalizers: []string{ + VMFinalizer, // Adding this to move past the first reconcile + }, + }, + } + + // Controller runtime client + crclient := fake.NewClientBuilder().WithObjects(cluster, vsphereCluster, machine, vSphereMachine, vSphereVM).WithScheme(scheme).Build() + + // Start in memory manager & add a resourceGroup for the cluster + inmemoryMgr := inmemoryruntime.NewManager(cloudScheme) + err := inmemoryMgr.Start(ctx) + g.Expect(err).ToNot(HaveOccurred()) + + inmemoryMgr.AddResourceGroup(klog.KObj(cluster).String()) + inmemoryClient := inmemoryMgr.GetResourceGroup(klog.KObj(cluster).String()).GetClient() + + r := VSphereVMReconciler{ + Client: crclient, + InMemoryManager: inmemoryMgr, + } + + // Reconcile + res, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ + Namespace: vSphereVM.Namespace, + Name: vSphereVM.Name, + }}) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res).To(Equal(ctrl.Result{RequeueAfter: 5 * time.Second})) + + // Check the conditionsTracker is waiting for infrastructure ready + conditionsTracker := &infrav1.VSphereVM{} + err = inmemoryClient.Get(ctx, client.ObjectKeyFromObject(vSphereVM), conditionsTracker) + g.Expect(err).ToNot(HaveOccurred()) + + c := conditions.Get(conditionsTracker, VMProvisionedCondition) + g.Expect(c.Status).To(Equal(corev1.ConditionFalse)) + g.Expect(c.Severity).To(Equal(clusterv1.ConditionSeverityInfo)) + g.Expect(c.Reason).To(Equal(WaitingControlPlaneInitializedReason)) + }) + + t.Run("VSphereMachine provisioned gets a node (worker)", func(t *testing.T) { + g := NewWithT(t) + + vsphereCluster := &infrav1.VSphereCluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + UID: "bar", + }, + } + + cluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + UID: "bar", + }, + Spec: clusterv1.ClusterSpec{ + InfrastructureRef: &corev1.ObjectReference{ + APIVersion: infrav1.GroupVersion.String(), + Kind: "VSphereCluster", + Namespace: vsphereCluster.Namespace, + Name: vsphereCluster.Name, + UID: vsphereCluster.UID, + }, + }, + } + + machine := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + Labels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + }, + }, + Spec: clusterv1.MachineSpec{ + Bootstrap: clusterv1.Bootstrap{ + DataSecretName: ptr.To("foo"), // this unblocks node provisioning + }, + }, + } + + vSphereMachine := &infrav1.VSphereMachine{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Machine", + Name: machine.Name, + UID: machine.UID, + }, + }, + }, + } + + vSphereVM := &infrav1.VSphereVM{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: infrav1.GroupVersion.String(), + Kind: "VSphereMachine", + Name: vSphereMachine.Name, + UID: vSphereMachine.UID, + }, + }, + Finalizers: []string{ + VMFinalizer, // Adding this to move past the first reconcile + }, + }, + Spec: infrav1.VSphereVMSpec{ + BiosUUID: "foo", // This unblocks provisioning of node + }, + Status: infrav1.VSphereVMStatus{ + Ready: true, // This unblocks provisioning of node + }, + } + + // Controller runtime client + crclient := fake.NewClientBuilder().WithObjects(cluster, vsphereCluster, machine, vSphereMachine, vSphereVM).WithScheme(scheme).Build() + + // Start cloud manager & add a resourceGroup for the cluster + inmemoryMgr := inmemoryruntime.NewManager(cloudScheme) + err := inmemoryMgr.Start(ctx) + g.Expect(err).ToNot(HaveOccurred()) + + inmemoryMgr.AddResourceGroup(klog.KObj(cluster).String()) + inmemoryClient := inmemoryMgr.GetResourceGroup(klog.KObj(cluster).String()).GetClient() + + // Start an http server + apiServerMux, err := inmemoryserver.NewWorkloadClustersMux(inmemoryMgr, "127.0.0.1", inmemoryserver.CustomPorts{ + // NOTE: make sure to use ports different than other tests, so we can run tests in parallel + MinPort: inmemoryserver.DefaultMinPort + 300, + MaxPort: inmemoryserver.DefaultMinPort + 399, + DebugPort: inmemoryserver.DefaultDebugPort + 3, + }) + g.Expect(err).ToNot(HaveOccurred()) + + r := VSphereVMReconciler{ + Client: crclient, + InMemoryManager: inmemoryMgr, + APIServerMux: apiServerMux, + } + + // Reconcile + nodeStartupDuration = 0 * time.Second + + res, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ + Namespace: vSphereVM.Namespace, + Name: vSphereVM.Name, + }}) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res).To(Equal(ctrl.Result{})) + + // Check the mirrorVSphereMachine reports all provisioned + + conditionsTracker := &infrav1.VSphereVM{} + err = inmemoryClient.Get(ctx, client.ObjectKeyFromObject(vSphereVM), conditionsTracker) + g.Expect(err).ToNot(HaveOccurred()) + + c := conditions.Get(conditionsTracker, NodeProvisionedCondition) + g.Expect(c.Status).To(Equal(corev1.ConditionTrue)) + }) +} diff --git a/test/infrastructure/vcsim/main.go b/test/infrastructure/vcsim/main.go new file mode 100644 index 0000000000..3bfdf6511f --- /dev/null +++ b/test/infrastructure/vcsim/main.go @@ -0,0 +1,410 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package main define main for the vcsim controller. +package main + +import ( + "context" + "flag" + "fmt" + "os" + "reflect" + goruntime "runtime" + "time" + + "github.com/pkg/errors" + "github.com/spf13/pflag" + vmoprv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha1" + topologyv1 "github.com/vmware-tanzu/vm-operator/external/tanzu-topology/api/v1alpha1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + storagev1 "k8s.io/api/storage/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/tools/leaderelection/resourcelock" + cliflag "k8s.io/component-base/cli/flag" + "k8s.io/component-base/logs" + logsv1 "k8s.io/component-base/logs/api/v1" + "k8s.io/klog/v2" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/controllers/remote" + "sigs.k8s.io/cluster-api/feature" + "sigs.k8s.io/cluster-api/util/flags" + "sigs.k8s.io/cluster-api/version" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/controller" + ctrlmgr "sigs.k8s.io/controller-runtime/pkg/manager" + + infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" + vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" + "sigs.k8s.io/cluster-api-provider-vsphere/pkg/constants" + inmemoryruntime "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/runtime" + inmemoryserver "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/tmp-to-be-deleted/server" + vcsimv1 "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/api/v1alpha1" + "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/vcsim/controllers" +) + +var ( + inmemoryScheme = runtime.NewScheme() + scheme = runtime.NewScheme() + setupLog = ctrl.Log.WithName("setup") + controllerName = "cluster-api-vcsim-controller-manager" + + // common flags flags. + enableLeaderElection bool + leaderElectionLeaseDuration time.Duration + leaderElectionRenewDeadline time.Duration + leaderElectionRetryPeriod time.Duration + watchFilterValue string + watchNamespace string + profilerAddress string + enableContentionProfiling bool + syncPeriod time.Duration + restConfigQPS float32 + restConfigBurst int + healthAddr string + diagnosticsOptions = flags.DiagnosticsOptions{} + logOptions = logs.NewOptions() + // vcsim specific flags. + vmConcurrency int + vCenterConcurrency int + fakeAPIServerConcurrency int + envsubstConcurrency int + // vsphere session specific flags. + enableKeepAlive bool + keepAliveDuration time.Duration +) + +func init() { + // scheme used for operating on the management cluster. + _ = corev1.AddToScheme(scheme) + _ = clusterv1.AddToScheme(scheme) + _ = infrav1.AddToScheme(scheme) + _ = vcsimv1.AddToScheme(scheme) + _ = topologyv1.AddToScheme(scheme) + _ = vmoprv1.AddToScheme(scheme) + _ = storagev1.AddToScheme(scheme) + _ = vmwarev1.AddToScheme(scheme) + + // scheme used for operating in memory. + _ = corev1.AddToScheme(inmemoryScheme) + _ = appsv1.AddToScheme(inmemoryScheme) + _ = rbacv1.AddToScheme(inmemoryScheme) + _ = infrav1.AddToScheme(inmemoryScheme) +} + +// InitFlags initializes the flags. +func InitFlags(fs *pflag.FlagSet) { + logsv1.AddFlags(logOptions, fs) + + fs.BoolVar(&enableLeaderElection, "leader-elect", false, + "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") + + fs.DurationVar(&leaderElectionLeaseDuration, "leader-elect-lease-duration", 15*time.Second, + "Interval at which non-leader candidates will wait to force acquire leadership (duration string)") + + fs.DurationVar(&leaderElectionRenewDeadline, "leader-elect-renew-deadline", 10*time.Second, + "Duration that the leading controller manager will retry refreshing leadership before giving up (duration string)") + + fs.DurationVar(&leaderElectionRetryPeriod, "leader-elect-retry-period", 2*time.Second, + "Duration the LeaderElector clients should wait between tries of actions (duration string)") + + fs.StringVar(&watchNamespace, "namespace", "", + "Namespace that the controller watches to reconcile cluster-api objects. If unspecified, the controller watches for cluster-api objects across all namespaces.") + + fs.StringVar(&watchFilterValue, "watch-filter", "", + fmt.Sprintf("Label value that the controller watches to reconcile cluster-api objects. Label key is always %s. If unspecified, the controller watches for all cluster-api objects.", clusterv1.WatchLabel)) + + fs.StringVar(&profilerAddress, "profiler-address", "", + "Bind address to expose the pprof profiler (e.g. localhost:6060)") + + fs.BoolVar(&enableContentionProfiling, "contention-profiling", false, + "Enable block profiling") + + fs.IntVar(&vmConcurrency, "vm-concurrency", 10, + "Number of vsphere VM to process simultaneously") + + fs.IntVar(&vCenterConcurrency, "vcenter-concurrency", 10, + "Number of vcenter server to process simultaneously") + + fs.IntVar(&fakeAPIServerConcurrency, "fake-apiserver-endpoint-concurrency", 10, + "Number of vcsim control plane endpoint to process simultaneously") + + fs.IntVar(&envsubstConcurrency, "envsubst-concurrency", 10, + "Number of envsubst to process simultaneously") + + fs.DurationVar(&syncPeriod, "sync-period", 10*time.Minute, + "The minimum interval at which watched resources are reconciled (e.g. 15m)") + + fs.Float32Var(&restConfigQPS, "kube-api-qps", 20, + "Maximum queries per second from the controller client to the Kubernetes API server. Defaults to 20") + + fs.IntVar(&restConfigBurst, "kube-api-burst", 30, + "Maximum number of queries that should be allowed in one burst from the controller client to the Kubernetes API server. Default 30") + + fs.StringVar(&healthAddr, "health-addr", ":9440", + "The address the health endpoint binds to.") + + fs.BoolVar(&enableKeepAlive, "enable-keep-alive", constants.DefaultEnableKeepAlive, + "feature to enable keep alive handler in vsphere sessions. This functionality is enabled by default.") + + fs.DurationVar(&keepAliveDuration, "keep-alive-duration", constants.DefaultKeepAliveDuration, + "idle time interval(minutes) in between send() requests in keepalive handler") + + flags.AddDiagnosticsOptions(fs, &diagnosticsOptions) + + feature.MutableGates.AddFlag(fs) +} + +// Add RBAC for the authorized diagnostics endpoint. +// +kubebuilder:rbac:groups=authentication.k8s.io,resources=tokenreviews,verbs=create +// +kubebuilder:rbac:groups=authorization.k8s.io,resources=subjectaccessreviews,verbs=create + +func main() { + InitFlags(pflag.CommandLine) + pflag.CommandLine.SetNormalizeFunc(cliflag.WordSepNormalizeFunc) + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + // Set log level 2 as default. + if err := pflag.CommandLine.Set("v", "2"); err != nil { + setupLog.Error(err, "failed to set default log level") + os.Exit(1) + } + pflag.Parse() + + if err := logsv1.ValidateAndApply(logOptions, nil); err != nil { + setupLog.Error(err, "unable to start manager") + os.Exit(1) + } + + // klog.Background will automatically use the right logger. + ctrl.SetLogger(klog.Background()) + + restConfig := ctrl.GetConfigOrDie() + restConfig.QPS = restConfigQPS + restConfig.Burst = restConfigBurst + restConfig.UserAgent = remote.DefaultClusterAPIUserAgent(controllerName) + + diagnosticsOpts := flags.GetDiagnosticsOptions(diagnosticsOptions) + + var watchNamespaces map[string]cache.Config + if watchNamespace != "" { + watchNamespaces = map[string]cache.Config{ + watchNamespace: {}, + } + } + + if enableContentionProfiling { + goruntime.SetBlockProfileRate(1) + } + + ctrlOptions := ctrl.Options{ + Scheme: scheme, + LeaderElection: enableLeaderElection, + LeaderElectionID: "vcsim-controller-leader-election-capi", + LeaseDuration: &leaderElectionLeaseDuration, + RenewDeadline: &leaderElectionRenewDeadline, + RetryPeriod: &leaderElectionRetryPeriod, + LeaderElectionResourceLock: resourcelock.LeasesResourceLock, + HealthProbeBindAddress: healthAddr, + PprofBindAddress: profilerAddress, + Metrics: diagnosticsOpts, + Cache: cache.Options{ + DefaultNamespaces: watchNamespaces, + SyncPeriod: &syncPeriod, + }, + Client: client.Options{ + Cache: &client.CacheOptions{ + DisableFor: []client.Object{ + &corev1.ConfigMap{}, + &corev1.Secret{}, + }, + }, + }, + // WebhookServer: webhook.NewServer( + // webhook.Options{ + // Port: webhookPort, + // CertDir: webhookCertDir, + // TLSOpts: tlsOptionOverrides, + // }, + // ), + } + + mgr, err := ctrl.NewManager(restConfig, ctrlOptions) + if err != nil { + setupLog.Error(err, "unable to start manager") + os.Exit(1) + } + + // Setup the context that's going to be used in controllers and for the manager. + ctx := ctrl.SetupSignalHandler() + + // Check for non-supervisor VSphereCluster and start controller if found + gvr := infrav1.GroupVersion.WithResource(reflect.TypeOf(&infrav1.VSphereCluster{}).Elem().Name()) + nonSupervisorMode, err := isCRDDeployed(mgr, gvr) + if err != nil { + setupLog.Error(err, "unable to detect supervisor mode") + os.Exit(1) + } + + // Check for supervisor VSphereCluster and start controller if found + gvr = vmwarev1.GroupVersion.WithResource(reflect.TypeOf(&vmwarev1.VSphereCluster{}).Elem().Name()) + supervisorMode, err := isCRDDeployed(mgr, gvr) + if err != nil { + setupLog.Error(err, "unable to detect supervisor mode") + os.Exit(1) + } + + // Continuing startup does not make sense without having managers added. + if !nonSupervisorMode && !supervisorMode { + err := errors.New("neither supervisor nor non-supervisor CRDs detected") + setupLog.Error(err, "CAPV CRDs are not deployed yet, restarting") + os.Exit(1) + } + + setupChecks(mgr, supervisorMode) + setupIndexes(ctx, mgr, supervisorMode) + setupReconcilers(ctx, mgr, supervisorMode) + setupWebhooks(mgr, supervisorMode) + + setupLog.Info("starting manager", "version", version.Get().String()) + if err := mgr.Start(ctx); err != nil { + setupLog.Error(err, "problem running manager") + os.Exit(1) + } +} + +func setupChecks(mgr ctrl.Manager, _ bool) { + if err := mgr.AddReadyzCheck("webhook", mgr.GetWebhookServer().StartedChecker()); err != nil { + setupLog.Error(err, "unable to create ready check") + os.Exit(1) + } + + if err := mgr.AddHealthzCheck("webhook", mgr.GetWebhookServer().StartedChecker()); err != nil { + setupLog.Error(err, "unable to create health check") + os.Exit(1) + } +} + +func setupIndexes(_ context.Context, _ ctrl.Manager, _ bool) { +} + +func setupReconcilers(ctx context.Context, mgr ctrl.Manager, supervisorMode bool) { + // Start cloud manager + inmemoryManager := inmemoryruntime.NewManager(inmemoryScheme) + if err := inmemoryManager.Start(ctx); err != nil { + setupLog.Error(err, "unable to start a cloud manager") + os.Exit(1) + } + + // Start an http server + podIP := os.Getenv("POD_IP") + apiServerMux, err := inmemoryserver.NewWorkloadClustersMux(inmemoryManager, podIP) + if err != nil { + setupLog.Error(err, "unable to create workload clusters mux") + os.Exit(1) + } + + // Setup reconcilers + if err := (&controllers.VCenterSimulatorReconciler{ + Client: mgr.GetClient(), + SupervisorMode: supervisorMode, + WatchFilterValue: watchFilterValue, + }).SetupWithManager(ctx, mgr, concurrency(vCenterConcurrency)); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "VCenterSimulatorReconciler") + os.Exit(1) + } + + if err := (&controllers.ControlPlaneEndpointReconciler{ + Client: mgr.GetClient(), + InMemoryManager: inmemoryManager, + APIServerMux: apiServerMux, + PodIP: podIP, + WatchFilterValue: watchFilterValue, + }).SetupWithManager(ctx, mgr, concurrency(fakeAPIServerConcurrency)); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "ControlPlaneEndpointReconciler") + os.Exit(1) + } + + if supervisorMode { + if err := (&controllers.VirtualMachineReconciler{ + Client: mgr.GetClient(), + InMemoryManager: inmemoryManager, + APIServerMux: apiServerMux, + EnableKeepAlive: enableKeepAlive, + KeepAliveDuration: keepAliveDuration, + WatchFilterValue: watchFilterValue, + }).SetupWithManager(ctx, mgr, concurrency(vmConcurrency)); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "VirtualMachineReconciler") + os.Exit(1) + } + } else { + if err := (&controllers.VSphereVMReconciler{ + Client: mgr.GetClient(), + InMemoryManager: inmemoryManager, + APIServerMux: apiServerMux, + EnableKeepAlive: enableKeepAlive, + KeepAliveDuration: keepAliveDuration, + WatchFilterValue: watchFilterValue, + }).SetupWithManager(ctx, mgr, concurrency(vmConcurrency)); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "VSphereVMReconciler") + os.Exit(1) + } + } + + if err := (&controllers.EnvVarReconciler{ + Client: mgr.GetClient(), + SupervisorMode: supervisorMode, + PodIP: podIP, + WatchFilterValue: watchFilterValue, + }).SetupWithManager(ctx, mgr, concurrency(envsubstConcurrency)); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "EnvVarReconciler") + os.Exit(1) + } +} + +func setupWebhooks(_ ctrl.Manager, _ bool) { +} + +func concurrency(c int) controller.Options { + return controller.Options{MaxConcurrentReconciles: c} +} + +func isCRDDeployed(mgr ctrlmgr.Manager, gvr schema.GroupVersionResource) (bool, error) { + _, err := mgr.GetRESTMapper().KindFor(gvr) + if err != nil { + var discoveryErr *apiutil.ErrResourceDiscoveryFailed + ok := errors.As(errors.Unwrap(err), &discoveryErr) + if !ok { + return false, err + } + discoveryErrs := *discoveryErr + gvrErr, ok := discoveryErrs[gvr.GroupVersion()] + if !ok { + return false, err + } + if apierrors.IsNotFound(gvrErr) { + return false, nil + } + return false, err + } + return true, nil +} diff --git a/test/infrastructure/vcsim/scripts/vcsim.sh b/test/infrastructure/vcsim/scripts/vcsim.sh new file mode 100755 index 0000000000..fc8b787fec --- /dev/null +++ b/test/infrastructure/vcsim/scripts/vcsim.sh @@ -0,0 +1,100 @@ +#!/bin/bash + +# Copyright 2024 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +VCENTERSIMULATOR_NAME=$1 +CLUSTER_NAME=$2 + +# Validate params +if [ -z "$VCENTERSIMULATOR_NAME" ] +then + echo "ERROR: VCenterSimulator name missing. usage: vcsim-prepare " + exit 1 +fi + +if [ -z "$CLUSTER_NAME" ] +then + echo "ERROR: Workload cluster name missing. usage: vcsim-prepare " + exit 1 +fi + +# Check VCenterSimulator exists or create it +if eval "kubectl get VCenterSimulator $VCENTERSIMULATOR_NAME &> /dev/null"; then + echo "using existing VCenterSimulator $VCENTERSIMULATOR_NAME" +else + kubectl apply -f - &> /dev/null < /dev/null"; then + echo "using existing ControlPlaneEndpoint $CLUSTER_NAME" +else + kubectl apply -f - &> /dev/null < /dev/null"; then + echo "using existing EnvVar $CLUSTER_NAME" +else + kubectl apply -f - &> /dev/null < vcsim.env + +echo "done!" +echo "GOVC_URL=$(kubectl get envvar "$CLUSTER_NAME" -o json | jq -r ".status.variables.GOVC_URL")" +echo +echo "source vcsim.env" diff --git a/test/infrastructure/vcsim/tilt-provider.json b/test/infrastructure/vcsim/tilt-provider.json new file mode 100644 index 0000000000..f7ec792c4c --- /dev/null +++ b/test/infrastructure/vcsim/tilt-provider.json @@ -0,0 +1,14 @@ +{ + "name": "vcsim", + "config": { + "version": "v1.10.99", + "image": "gcr.io/k8s-staging-capi-vsphere/cluster-api-vcsim-controller", + "live_reload_deps": [ + "main.go", + "api", + "controllers", + "server" + ], + "label": "VCSIM" + } +} diff --git a/tilt-provider.json b/tilt-provider.json index ace5e31565..34209bcac1 100644 --- a/tilt-provider.json +++ b/tilt-provider.json @@ -1,18 +1,39 @@ -{ - "name": "vsphere", - "config": { - "version": "v1.10.99", - "image": "gcr.io/k8s-staging-capi-vsphere/cluster-api-vsphere-controller", - "live_reload_deps": [ - "main.go", - "go.mod", - "go.sum", - "apis", - "controllers", - "feature", - "internal", - "pkg" - ], - "label": "CAPV" +[ + { + "name": "vsphere", + "config": { + "version": "v1.10.99", + "image": "gcr.io/k8s-staging-capi-vsphere/cluster-api-vsphere-controller", + "live_reload_deps": [ + "main.go", + "go.mod", + "go.sum", + "apis", + "controllers", + "feature", + "internal", + "pkg" + ], + "label": "CAPV" + } + }, + { + "name": "vsphere-supervisor", + "config": { + "version": "v1.10.99", + "image": "gcr.io/k8s-staging-capi-vsphere/cluster-api-vsphere-controller", + "live_reload_deps": [ + "main.go", + "go.mod", + "go.sum", + "apis", + "controllers", + "feature", + "internal", + "pkg" + ], + "label": "CAPV", + "kustomize_folder": "/config/supervisor" + } } -} +]