From 605cd2078d5db2f7881d1438b6ec6dae3c8e4eba Mon Sep 17 00:00:00 2001 From: Christian Schlotter Date: Thu, 1 Feb 2024 08:41:12 +0100 Subject: [PATCH] hack: add capv-janitor for automated ci cleanup --- hack/tools/janitor/Dockerfile | 56 ++++ hack/tools/janitor/Makefile | 29 +++ hack/tools/janitor/README.md | 32 +++ .../janitor/config/cronjob/credentials.yaml | 11 + .../tools/janitor/config/cronjob/cronjob.yaml | 46 ++++ .../janitor/config/cronjob/kustomization.yaml | 10 + .../janitor/config/cronjob/namespace.yaml | 4 + .../config/cronjob/service_account.yaml | 5 + .../janitor/config/default/kustomization.yaml | 11 + .../janitor/config/rbac/kustomization.yaml | 9 + hack/tools/janitor/config/rbac/role.yaml | 20 ++ .../janitor/config/rbac/rolebinding.yaml | 12 + hack/tools/janitor/janitor.go | 242 ++++++++++++++++++ hack/tools/janitor/main.go | 123 +++++++++ hack/tools/janitor/vSphere.go | 106 ++++++++ 15 files changed, 716 insertions(+) create mode 100644 hack/tools/janitor/Dockerfile create mode 100644 hack/tools/janitor/Makefile create mode 100644 hack/tools/janitor/README.md create mode 100644 hack/tools/janitor/config/cronjob/credentials.yaml create mode 100644 hack/tools/janitor/config/cronjob/cronjob.yaml create mode 100644 hack/tools/janitor/config/cronjob/kustomization.yaml create mode 100644 hack/tools/janitor/config/cronjob/namespace.yaml create mode 100644 hack/tools/janitor/config/cronjob/service_account.yaml create mode 100644 hack/tools/janitor/config/default/kustomization.yaml create mode 100644 hack/tools/janitor/config/rbac/kustomization.yaml create mode 100644 hack/tools/janitor/config/rbac/role.yaml create mode 100644 hack/tools/janitor/config/rbac/rolebinding.yaml create mode 100644 hack/tools/janitor/janitor.go create mode 100644 hack/tools/janitor/main.go create mode 100644 hack/tools/janitor/vSphere.go diff --git a/hack/tools/janitor/Dockerfile b/hack/tools/janitor/Dockerfile new file mode 100644 index 0000000000..2554164759 --- /dev/null +++ b/hack/tools/janitor/Dockerfile @@ -0,0 +1,56 @@ +# syntax=docker/dockerfile:1.4 + +# Copyright 2019 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Build the manager binary +ARG GOLANG_VERSION=golang:1.20.12 +FROM --platform=${BUILDPLATFORM} ${GOLANG_VERSION} as builder +WORKDIR /workspace + +# Run this with docker build --build_arg $(go env GOPROXY) to override the goproxy +ARG goproxy=https://proxy.golang.org +ENV GOPROXY=${goproxy} + +# Copy the Go Modules manifests +COPY go.mod go.mod +COPY go.sum go.sum + +# Cache deps before building and copying source so that we don't need to re-download as much +# and so that source changes don't invalidate our downloaded layer +RUN --mount=type=cache,target=/go/pkg/mod \ + go mod download + +# Copy the sources +COPY ./ ./ + +# Build +ARG TARGETOS +ARG TARGETARCH +ARG ldflags +WORKDIR /workspace +RUN --mount=type=cache,target=/root/.cache/go-build \ + --mount=type=cache,target=/go/pkg/mod \ + CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} \ + go build -a -ldflags "${ldflags} -extldflags '-static'" \ + -o /out/capv-janitor ./hack/tools/janitor + +# Copy the capv-janitor into a thin image +ARG TARGETPLATFORM +FROM --platform=${TARGETPLATFORM} gcr.io/distroless/static:nonroot +WORKDIR / +COPY --from=builder /out/capv-janitor . +# Use uid of nonroot user (65532) because kubernetes expects numeric user when applying PSPs +USER 65532 +ENTRYPOINT ["/capv-janitor"] diff --git a/hack/tools/janitor/Makefile b/hack/tools/janitor/Makefile new file mode 100644 index 0000000000..789f2a87cd --- /dev/null +++ b/hack/tools/janitor/Makefile @@ -0,0 +1,29 @@ +# Copyright 2024 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +all: build + +VERSION ?= $(shell git describe --always --dirty) +IMAGE_NAME ?= gcr.io/k8s-staging-capi-vsphere/extra/capv-janitor +IMAGE_TAG ?= $(IMAGE_NAME):$(VERSION) + +build: + docker build -t $(IMAGE_TAG) -f Dockerfile ../../.. + docker tag $(IMAGE_TAG) $(IMAGE_NAME):latest +.PHONY: build + +push: + docker push $(IMAGE_TAG) + docker push $(IMAGE_NAME):latest +.PHONY: push diff --git a/hack/tools/janitor/README.md b/hack/tools/janitor/README.md new file mode 100644 index 0000000000..4a1a5f057a --- /dev/null +++ b/hack/tools/janitor/README.md @@ -0,0 +1,32 @@ +# janitor + +The janitor is a tool for CI to cleanup objects leftover from failed or killed prowjobs. +It can be run regularily as cronjob. + +It tries to delete: + +* vSphere: virtual machines in the configured folders which exist longer than the configured `--max-age` flag. +* vSphere: cluster modules which do not refer any virtual machine +* IPAM: IPAddressClaims which exist longer than the configured `--max-age` flag + +## Deployment + +1. (Optional:) Build and push image: + + ```sh + cd hack/tools/janitor + make build push + ``` + +2. Deploy using kustomize and envsubst: + + ```sh + export + export VSPHERE_SERVER="" + export VSPHERE_USERNAME="" + export VSPHERE_PASSWORD="" + export VSPHERE_TLS_THUMBPRINT="" + + cd hack/tools/janitor + kustomize build config/default | envsubst | kubectl apply -f - + ``` diff --git a/hack/tools/janitor/config/cronjob/credentials.yaml b/hack/tools/janitor/config/cronjob/credentials.yaml new file mode 100644 index 0000000000..d05248f9fb --- /dev/null +++ b/hack/tools/janitor/config/cronjob/credentials.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Secret +metadata: + name: janitor + namespace: default +type: Opaque +stringData: + VSPHERE_USERNAME: "${VSPHERE_USERNAME}" + VSPHERE_PASSWORD: "${VSPHERE_PASSWORD}" + VSPHERE_SERVER: "${VSPHERE_SERVER}" + VSPHERE_TLS_THUMBPRINT: "${VSPHERE_TLS_THUMBPRINT}" diff --git a/hack/tools/janitor/config/cronjob/cronjob.yaml b/hack/tools/janitor/config/cronjob/cronjob.yaml new file mode 100644 index 0000000000..53b2d78fd7 --- /dev/null +++ b/hack/tools/janitor/config/cronjob/cronjob.yaml @@ -0,0 +1,46 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: janitor + namespace: default +spec: + # Run twice a day + schedule: "0 */12 * * *" + concurrencyPolicy: Replace + failedJobsHistoryLimit: 3 + successfulJobsHistoryLimit: 3 + jobTemplate: + spec: + template: + spec: + containers: + - args: + - --dry-run=false + - --min-age=12h + # In CAPV's CI IPAddressClaims are created in the default namespace. + - --ipam-namespace=default + # The directories used in CI to cleanup. + - --folder=/SDDC-Datacenter/host/Cluster-1/Resources/Compute-ResourcePool/cluster-api-provider-vsphere + - --folder=/SDDC-Datacenter/host/Cluster-1/Resources/Compute-ResourcePool/cloud-provider-vsphere + - --folder=/SDDC-Datacenter/host/Cluster-1/Resources/Compute-ResourcePool/image-builder + image: gcr.io/k8s-staging-capi-vsphere/extra/capv-janitor:latest + imagePullPolicy: IfNotPresent + name: capv-janitor + envFrom: + - secretRef: + name: capv-janitor + optional: false + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsUser: 65532 + runAsGroup: 65532 + restartPolicy: Never + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: janitor + terminationGracePeriodSeconds: 10 diff --git a/hack/tools/janitor/config/cronjob/kustomization.yaml b/hack/tools/janitor/config/cronjob/kustomization.yaml new file mode 100644 index 0000000000..297f5eb0a2 --- /dev/null +++ b/hack/tools/janitor/config/cronjob/kustomization.yaml @@ -0,0 +1,10 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +namespace: capv-janitor + +resources: +- credentials.yaml +- cronjob.yaml +- namespace.yaml +- service_account.yaml diff --git a/hack/tools/janitor/config/cronjob/namespace.yaml b/hack/tools/janitor/config/cronjob/namespace.yaml new file mode 100644 index 0000000000..0917cc70f3 --- /dev/null +++ b/hack/tools/janitor/config/cronjob/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: janitor \ No newline at end of file diff --git a/hack/tools/janitor/config/cronjob/service_account.yaml b/hack/tools/janitor/config/cronjob/service_account.yaml new file mode 100644 index 0000000000..42102104f5 --- /dev/null +++ b/hack/tools/janitor/config/cronjob/service_account.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: janitor + namespace: janitor \ No newline at end of file diff --git a/hack/tools/janitor/config/default/kustomization.yaml b/hack/tools/janitor/config/default/kustomization.yaml new file mode 100644 index 0000000000..60019bef38 --- /dev/null +++ b/hack/tools/janitor/config/default/kustomization.yaml @@ -0,0 +1,11 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +namePrefix: capv- + +commonLabels: + component: capv-janitor + +bases: +- ../cronjob +- ../rbac \ No newline at end of file diff --git a/hack/tools/janitor/config/rbac/kustomization.yaml b/hack/tools/janitor/config/rbac/kustomization.yaml new file mode 100644 index 0000000000..db31f5ce05 --- /dev/null +++ b/hack/tools/janitor/config/rbac/kustomization.yaml @@ -0,0 +1,9 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +# In CAPV's CI IPAddressClaims are created in the default namespace. +namespace: default + +resources: +- role.yaml +- rolebinding.yaml diff --git a/hack/tools/janitor/config/rbac/role.yaml b/hack/tools/janitor/config/rbac/role.yaml new file mode 100644 index 0000000000..154d7d1d16 --- /dev/null +++ b/hack/tools/janitor/config/rbac/role.yaml @@ -0,0 +1,20 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: capv-janitor-role +rules: +- apiGroups: + - ipam.cluster.x-k8s.io + resources: + - ipaddressclaims + verbs: + - delete +- apiGroups: + - ipam.cluster.x-k8s.io + resources: + - ipaddressclaims + - ipaddresses + verbs: + - get + - list + - watch diff --git a/hack/tools/janitor/config/rbac/rolebinding.yaml b/hack/tools/janitor/config/rbac/rolebinding.yaml new file mode 100644 index 0000000000..c3fa331fdf --- /dev/null +++ b/hack/tools/janitor/config/rbac/rolebinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: capv-janitor-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: capv-janitor-role +subjects: +- kind: ServiceAccount + name: capv-janitor + namespace: capv-janitor diff --git a/hack/tools/janitor/janitor.go b/hack/tools/janitor/janitor.go new file mode 100644 index 0000000000..0621b8ebe5 --- /dev/null +++ b/hack/tools/janitor/janitor.go @@ -0,0 +1,242 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package main + +import ( + "context" + "fmt" + "time" + + "github.com/pkg/errors" + "github.com/vmware/govmomi/find" + "github.com/vmware/govmomi/object" + govmomicluster "github.com/vmware/govmomi/vapi/cluster" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" + ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1alpha1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func newJanitor(vSphereClients *vSphereClients, ipamClient client.Client, delta time.Duration, ipamNamespace string, dryRun bool) *janitor { + return &janitor{ + dryRun: dryRun, + ipamClient: ipamClient, + ipamNamespace: ipamNamespace, + maxCreationDate: time.Now().Add(-delta), + vSphereClients: vSphereClients, + } +} + +type janitor struct { + dryRun bool + ipamClient client.Client + ipamNamespace string + maxCreationDate time.Time + vSphereClients *vSphereClients +} + +type virtualMachine struct { + managedObject mo.VirtualMachine + object *object.VirtualMachine +} + +// vSphereVMs deletes all vSphereVMs in a given folder in vSphere if their creation +// timestamp is before the janitor's configured maxCreationDate. +func (s *janitor) vSphereVMs(ctx context.Context, folder string) error { + log := ctrl.LoggerFrom(ctx).WithName("vSphereVMs").WithValues("folder", folder) + ctx = ctrl.LoggerInto(ctx, log) + + if folder == "" { + return fmt.Errorf("cannot use empty string as folder") + } + + log.Info("Deleting vSphere VMs in folder") + + // List all virtual machines inside the folder. + finder := find.NewFinder(s.vSphereClients.Vim, false) + managedObjects, err := finder.ManagedObjectListChildren(ctx, folder+"/...", "VirtualMachine") + if err != nil { + return err + } + + if len(managedObjects) == 0 { + return nil + } + + // Retrieve information for all found virtual machines. + managedObjectReferences := []types.ManagedObjectReference{} + for _, obj := range managedObjects { + managedObjectReferences = append(managedObjectReferences, obj.Object.Reference()) + } + var managedObjectVMs []mo.VirtualMachine + if err := s.vSphereClients.Govmomi.Retrieve(ctx, managedObjectReferences, []string{"config", "summary.runtime.powerState"}, &managedObjectVMs); err != nil { + return err + } + + vmsToDeleteAndPoweroff := []*virtualMachine{} + vmsToDelete := []*virtualMachine{} + + // Filter out vms we don't have to cleanup depending on s.maxCreationDate. + for _, managedObjectVM := range managedObjectVMs { + if managedObjectVM.Config.CreateDate.After(s.maxCreationDate) { + // Ignore vms created after maxCreationDate + continue + } + + vm := &virtualMachine{ + managedObject: managedObjectVM, + object: object.NewVirtualMachine(s.vSphereClients.Vim, managedObjectVM.Reference()), + } + + if vm.managedObject.Summary.Runtime.PowerState == types.VirtualMachinePowerStatePoweredOn { + vmsToDeleteAndPoweroff = append(vmsToDeleteAndPoweroff, vm) + continue + } + vmsToDelete = append(vmsToDelete, vm) + } + + // PowerOff vms which are still running. Triggering PowerOff for a VM results in a task in vSphere. + poweroffTasks := []*object.Task{} + for _, vm := range vmsToDeleteAndPoweroff { + log.Info("Powering off vm in vSphere", "vm", vm.managedObject.Config.Name) + if s.dryRun { + // Skipping actual PowerOff on dryRun. + continue + } + task, err := vm.object.PowerOff(ctx) + if err != nil { + return err + } + poweroffTasks = append(poweroffTasks, task) + } + // Wait for all PowerOff tasks to be finished. We intentionally ignore errors here + // because the VM may already got into PowerOff state and log the errors only. + // xref govc: https://github.com/vmware/govmomi/blob/512c168/govc/vm/destroy.go#L94-L96 + if err := waitForTasksFinished(ctx, poweroffTasks, true); err != nil { + log.Info("Ignoring error for PowerOff task", "err", err) + } + + destroyTasks := []*object.Task{} + for _, vm := range append(vmsToDeleteAndPoweroff, vmsToDelete...) { + log.Info("Destroying vm in vSphere", "vm", vm.managedObject.Config.Name) + if dryRun { + // Skipping actual destroy on dryRun. + continue + } + task, err := vm.object.Destroy(ctx) + if err != nil { + return err + } + destroyTasks = append(destroyTasks, task) + } + // Wait for all destroy tasks to succeed. + if err := waitForTasksFinished(ctx, destroyTasks, false); err != nil { + return errors.Wrap(err, "failed to wait for vm poweroff task to finish") + } + + return nil +} + +func waitForTasksFinished(ctx context.Context, tasks []*object.Task, ignoreErrors bool) error { + for _, t := range tasks { + if err := t.Wait(ctx); !ignoreErrors && err != nil { + return err + } + } + return nil +} + +func (s *janitor) ipAddressClaims(ctx context.Context) error { + log := ctrl.LoggerFrom(ctx).WithName("IPAddressClaims") + ctrl.LoggerInto(ctx, log) + log.Info("Deleting IPAddressClaims") + + // List all existing IPAddressClaims + ipAddressClaims := &ipamv1.IPAddressClaimList{} + if err := s.ipamClient.List(ctx, ipAddressClaims, + client.InNamespace(s.ipamNamespace), + ); err != nil { + return err + } + + errList := []error{} + + for _, ipAddressClaim := range ipAddressClaims.Items { + ipAddressClaim := ipAddressClaim + // Skip IPAddressClaims which got created after maxCreationDate. + if ipAddressClaim.CreationTimestamp.After(s.maxCreationDate) { + continue + } + + log.Info("Deleting IPAddressClaim", "IPAddressClaim", klog.KObj(&ipAddressClaim)) + + if s.dryRun { + // Skipping actual deletion on dryRun. + continue + } + + if err := s.ipamClient.Delete(ctx, &ipAddressClaim); err != nil { + errList = append(errList, err) + } + } + + return kerrors.NewAggregate(errList) +} + +func (s *janitor) vSphereClusterModules(ctx context.Context) error { + log := ctrl.LoggerFrom(ctx).WithName("vSphere cluster modules") + ctrl.LoggerInto(ctx, log) + log.Info("Deleting vSphere cluster modules") + + manager := govmomicluster.NewManager(s.vSphereClients.Rest) + + // List all existing modules + clusterModules, err := manager.ListModules(ctx) + if err != nil { + return err + } + + errList := []error{} + // Check for all modules if they refer members and delete them if they are empty. + for _, clusterModule := range clusterModules { + members, err := manager.ListModuleMembers(ctx, clusterModule.Module) + if err != nil { + errList = append(errList, err) + continue + } + + // Do not attempt to delete if the cluster module still refers virtual machines. + if len(members) > 0 { + continue + } + + log.Info("Deleting empty vSphere cluster module", "clusterModule", clusterModule.Module) + + if s.dryRun { + // Skipping actual deletion on dryRun. + continue + } + + if err := manager.DeleteModule(ctx, clusterModule.Module); err != nil { + errList = append(errList, err) + } + } + + return kerrors.NewAggregate(errList) +} diff --git a/hack/tools/janitor/main.go b/hack/tools/janitor/main.go new file mode 100644 index 0000000000..c5e4d62644 --- /dev/null +++ b/hack/tools/janitor/main.go @@ -0,0 +1,123 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package main + +import ( + "context" + "flag" + "os" + "time" + + "github.com/pkg/errors" + "github.com/spf13/pflag" + "k8s.io/apimachinery/pkg/runtime" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" + ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1alpha1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var ipamScheme *runtime.Scheme + +func init() { + ipamScheme = runtime.NewScheme() + _ = ipamv1.AddToScheme(ipamScheme) +} + +var ( + dryRun bool + ipamNamespace string + minAge time.Duration + vsphereFolders []string +) + +func initFlags(fs *pflag.FlagSet) { + fs.StringArrayVar(&vsphereFolders, "folder", []string{}, "Path to folders in vCenter to cleanup virtual machines.") + fs.StringVar(&ipamNamespace, "ipam-namespace", "", "Namespace for IPAddressClaim cleanup.") + fs.DurationVar(&minAge, "min-age", time.Hour*12, "Minimum age of an object before it is getting deleted.") + fs.BoolVar(&dryRun, "dry-run", false, "dry-run results in not deleting anything but printing the actions.") +} + +func main() { + initFlags(pflag.CommandLine) + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.Parse() + + log := klog.Background() + ctx := ctrl.LoggerInto(context.Background(), log) + + if err := run(ctx); err != nil { + log.Error(err, "Failed running vsphere-janitor") + os.Exit(1) + } + + log.Info("Finished cleanup.") +} + +func run(ctx context.Context) error { + log := ctrl.LoggerFrom(ctx) + + log.Info("Configured settings", "dryRun", dryRun) + log.Info("Configured settings", "folders", vsphereFolders) + log.Info("Configured settings", "ipamNamespace", ipamNamespace) + log.Info("Configured settings", "minAge", minAge) + + // Create clients for vSphere. + vSphereClients, err := newVSphereClients(ctx, getVSphereClientInput{ + Username: os.Getenv("VSPHERE_USERNAME"), + Password: os.Getenv("VSPHERE_PASSWORD"), + Server: os.Getenv("VSPHERE_SERVER"), + Thumbprint: os.Getenv("VSPHERE_TLS_THUMBPRINT"), + UserAgent: "capv-janitor", + }) + if err != nil { + return errors.Wrap(err, "creating vSphere clients") + } + defer vSphereClients.logout(ctx) + + // Create controller-runtime client for IPAM. + restConfig := ctrl.GetConfigOrDie() + ipamClient, err := client.New(restConfig, client.Options{Scheme: ipamScheme}) + if err != nil { + return errors.Wrap(err, "creating IPAM client") + } + + janitor := newJanitor(vSphereClients, ipamClient, minAge, ipamNamespace, dryRun) + + // First cleanup old vms to free up IPAddressClaims or cluster modules which are still in-use. + errList := []error{} + for _, folder := range vsphereFolders { + if err := janitor.vSphereVMs(ctx, folder); err != nil { + errList = append(errList, errors.Wrapf(err, "cleaning up vSphereVMs for folder %q", folder)) + } + } + if err := kerrors.NewAggregate(errList); err != nil { + return errors.Wrap(err, "cleaning up vSphereVMs") + } + + // Second cleanup IPAddressClaims. + if err := janitor.ipAddressClaims(ctx); err != nil { + return errors.Wrap(err, "cleaning up IPAddressClaims") + } + + // Third cleanup cluster modules. + if err := janitor.vSphereClusterModules(ctx); err != nil { + return errors.Wrap(err, "cleaning up vSphere cluster modules") + } + + return nil +} diff --git a/hack/tools/janitor/vSphere.go b/hack/tools/janitor/vSphere.go new file mode 100644 index 0000000000..ff6d9d762f --- /dev/null +++ b/hack/tools/janitor/vSphere.go @@ -0,0 +1,106 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "net/url" + "time" + + "github.com/vmware/govmomi" + "github.com/vmware/govmomi/session" + "github.com/vmware/govmomi/session/keepalive" + "github.com/vmware/govmomi/vapi/rest" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/soap" + ctrl "sigs.k8s.io/controller-runtime" +) + +type getVSphereClientInput struct { + Password string + Server string + Thumbprint string + UserAgent string + Username string +} + +// vSphereClients is a collection of different clients for vSphere. +type vSphereClients struct { + Vim *vim25.Client + Govmomi *govmomi.Client + Rest *rest.Client +} + +// logout logs out all clients. It logs errors if the context contains a logger. +func (v *vSphereClients) logout(ctx context.Context) { + log := ctrl.LoggerFrom(ctx) + if err := v.Govmomi.Logout(ctx); err != nil { + log.Error(err, "logging out govmomi client") + } + + if err := v.Rest.Logout(ctx); err != nil { + log.Error(err, "logging out rest client") + } +} + +// newVSphereClients creates a vSphereClients object from the given input. +func newVSphereClients(ctx context.Context, input getVSphereClientInput) (*vSphereClients, error) { + urlCredentials := url.UserPassword(input.Username, input.Password) + + serverURL, err := soap.ParseURL(input.Server) + if err != nil { + return nil, err + } + serverURL.User = urlCredentials + var soapClient *soap.Client + if input.Thumbprint == "" { + soapClient = soap.NewClient(serverURL, true) + } else { + soapClient = soap.NewClient(serverURL, false) + soapClient.SetThumbprint(serverURL.Host, input.Thumbprint) + } + soapClient.UserAgent = input.UserAgent + + vimClient, err := vim25.NewClient(ctx, soapClient) + if err != nil { + return nil, err + } + + govmomiClient := &govmomi.Client{ + Client: vimClient, + SessionManager: session.NewManager(vimClient), + } + // To keep the session from timing out until the test suite finishes + govmomiClient.RoundTripper = keepalive.NewHandlerSOAP(govmomiClient.RoundTripper, 1*time.Minute, nil) + + // Login to session which will also start the keep alive goroutine + if err := govmomiClient.Login(ctx, urlCredentials); err != nil { + return nil, err + } + + restClient := rest.NewClient(vimClient) + restClient.Transport = keepalive.NewHandlerREST(restClient, 5*time.Minute, nil) + if err := restClient.Login(ctx, urlCredentials); err != nil { + return nil, err + } + + return &vSphereClients{ + Vim: vimClient, + Govmomi: govmomiClient, + Rest: restClient, + }, nil +}