From 8cedf3bb6188f909def87d4840f75b03417c7f5a Mon Sep 17 00:00:00 2001 From: Endre Karlson Date: Sun, 16 Jul 2023 13:06:16 +0200 Subject: [PATCH] Add and fix golangci issues --- .github/workflows/ci.yaml | 125 ++++++++++++++ .github/workflows/pr.yaml | 30 ++++ .golangci.yml | 114 +++++++++++++ Makefile | 152 +++++++++++++----- bootstrap/api/v1beta1/condition_consts.go | 4 +- bootstrap/api/v1beta1/kthreesconfig_types.go | 4 +- ...strap.cluster.x-k8s.io_kthreesconfigs.yaml | 8 +- ...uster.x-k8s.io_kthreesconfigtemplates.yaml | 8 +- ...cluster.x-k8s.io_kthreescontrolplanes.yaml | 9 +- bootstrap/config/manager/kustomization.yaml | 4 +- bootstrap/config/rbac/role.yaml | 75 +++++++++ .../controllers/kthreesconfig_controller.go | 68 ++++---- controlplane/api/v1beta1/condition_consts.go | 2 +- ...strap.cluster.x-k8s.io_kthreesconfigs.yaml | 8 +- ...uster.x-k8s.io_kthreesconfigtemplates.yaml | 8 +- ...cluster.x-k8s.io_kthreescontrolplanes.yaml | 9 +- .../config/manager/kustomization.yaml | 4 +- controlplane/controllers/const.go | 2 +- .../kthreescontrolplane_controller.go | 119 +++++++------- controlplane/controllers/scale.go | 51 +++--- controlplane/controllers/suite_test.go | 2 - pkg/cloudinit/cloudinit.go | 25 +-- pkg/cloudinit/controlplane_init_test.go | 1 - pkg/k3s/config.go | 22 ++- pkg/k3s/control_plane.go | 27 ++-- pkg/k3s/management_cluster.go | 19 +-- pkg/k3s/workload_cluster.go | 8 +- pkg/k3s/workload_cluster_coredns.go | 81 +++++----- pkg/kubeconfig/kubeconfig.go | 29 ++-- pkg/locking/control_plane_init_mutex.go | 11 +- pkg/machinefilters/machine_filters.go | 23 +-- pkg/secret/certificates.go | 9 +- pkg/token/token.go | 2 +- 33 files changed, 707 insertions(+), 356 deletions(-) create mode 100644 .github/workflows/ci.yaml create mode 100644 .github/workflows/pr.yaml create mode 100644 .golangci.yml diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml new file mode 100644 index 00000000..8e7b29b1 --- /dev/null +++ b/.github/workflows/ci.yaml @@ -0,0 +1,125 @@ +--- +name: CI Workflow + +on: + push: + branches: + - main + schedule: + - cron: "5 0 * * *" # TODO: Run every 4 hours to soak test, should be less frequent before merge (weekly/daily/???) + workflow_dispatch: + +env: + BOOTSTRAP_IMAGE_NAME: cluster-api-provider-k3s-bootstrap + CONTROLPLANE_IMAGE_NAME: cluster-api-provider-k3s-controlplane + GHCR_REGISTRY: ghcr.io/${{ github.repository_owner }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + +jobs: + validate: + name: "CI" + runs-on: ubuntu-latest + + steps: + - name: checkout + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - uses: actions/setup-go@v4 + with: + go-version-file: "./go.mod" + + - name: GoLangCI Lint + run: make lint + + - name: Tests - bootstrap + run: make test-bootstrap + + - name: Tests - controlplane + run: make test-controlplane + + build-image: + name: Build and Push Image + runs-on: ubuntu-latest + + permissions: + packages: write # needed to push docker image to ghcr.io + pull-requests: write # needed to create and update comments in PRs + + steps: + - name: Checkout git repo + uses: actions/checkout@v3 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Login to ghcr.io registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build docker - bootstrap + run: make docker-bootstrap + + - name: Build docker - controlplane + run: make docker-controlplane + + # If PR, put image tags in the PR comments + # from https://github.com/marketplace/actions/create-or-update-comment + - name: Find comment for image tags + uses: peter-evans/find-comment@v2 + if: github.event_name == 'pull_request' + id: fc + with: + issue-number: ${{ github.event.pull_request.number }} + comment-author: "github-actions[bot]" + body-includes: Docker image tag(s) pushed + + # If PR, put image tags in the PR comments + - name: Create or update comment for image tags + uses: peter-evans/create-or-update-comment@v3 + if: github.event_name == 'pull_request' + with: + comment-id: ${{ steps.fc.outputs.comment-id }} + issue-number: ${{ github.event.pull_request.number }} + body: | + Docker image tag(s) pushed: + ```text + ${{ steps.docker_meta.outputs.tags }} + ``` + + Labels added to images: + ```text + ${{ steps.docker_meta.outputs.labels }} + ``` + edit-mode: replace + + outputs: + image-tag: "${{ steps.docker_meta.outputs.version }}" + + release-manifests: + name: Make Release Manifests + runs-on: ubuntu-latest + needs: [build-image] + env: + TAG: ${{ needs.build-image.outputs.image-tag }} + + steps: + - name: checkout + uses: actions/checkout@v3 + + - uses: actions/setup-go@v4 + with: + go-version-file: "./go.mod" + + - name: Make Release + run: make release + + - name: Upload artifact + uses: actions/upload-artifact@v3 + with: + name: release-manifests + path: out/release diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml new file mode 100644 index 00000000..3dda43b0 --- /dev/null +++ b/.github/workflows/pr.yaml @@ -0,0 +1,30 @@ +name: Pull Request Validation +on: + pull_request: + types: [opened, synchronize, reopened] + workflow_dispatch: + +env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + +jobs: + validate: + name: "Validate ${{ matrix.target }}" + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + target: ["lint", "test-bootstrap", "test-controlplane"] + + steps: + - name: checkout + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - uses: actions/setup-go@v4 + with: + go-version-file: "./go.mod" + + - name: ${{ matrix.target }} + run: make ${{ matrix.target }} diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 00000000..47c3a5da --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,114 @@ +linters: + enable-all: true + disable: + - cyclop + - deadcode + - errname + - exhaustive + - exhaustruct + - exhaustivestruct + - forbidigo + - funlen + - gci + - gochecknoglobals + - gochecknoinits + - gocognit + - godot + - godox + - golint + - gofumpt + - gomnd + - ifshort + - interfacer + - lll + - maligned + - nestif + - nilnil + - nlreturn + - nosnakecase + - paralleltest + - scopelint + - structcheck + - tagliatelle + - testpackage + - thelper + - tparallel + - varcheck + - varnamelen + - wrapcheck + - wsl + +linters-settings: + depguard: + rules: + main: + allow: + - $gostd + - github.com/go-logr/logr + - github.com/coredns/corefile-migration/migration + + - k8s.io/apimachinery/pkg + - k8s.io/api + - k8s.io/apiserver + - k8s.io/client-go + - k8s.io/klog/v2/klogr + - k8s.io/utils/pointer + + - github.com/onsi/ginkgo + - github.com/onsi/gomega + + - sigs.k8s.io/yaml + - sigs.k8s.io/controller-runtime + - sigs.k8s.io/cluster-api + + - github.com/cluster-api-provider-k3s/cluster-api-k3s + importas: + # Do not allow unaliased imports of aliased packages. + # Default: false + no-unaliased: true + alias: + # Kubernetes + - pkg: k8s.io/api/core/v1 + alias: corev1 + - pkg: k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 + alias: apiextensionsv1 + - pkg: k8s.io/apimachinery/pkg/apis/meta/v1 + alias: metav1 + - pkg: k8s.io/apimachinery/pkg/api/errors + alias: apierrors + - pkg: k8s.io/apimachinery/pkg/util/errors + alias: kerrors + # Controller Runtime + - pkg: sigs.k8s.io/controller-runtime + alias: ctrl + tagliatelle: + case: + rules: + # Any struct tag type can be used. + # Support string case: `camel`, `pascal`, `kebab`, `snake`, `goCamel`, `goPascal`, `goKebab`, `goSnake`, `upper`, `lower`, `header` + json: goCamel + gomoddirectives: + # List of allowed `replace` directives. + # Default: [] + replace-allow-list: + - sigs.k8s.io/cluster-api + goimports: + # put imports beginning with prefix after 3rd-party packages; + # it's a comma-separated list of prefixes + local-prefixes: sigs.k8s.io/cluster-api-provider-packet + nolintlint: + # Enable to require nolint directives to mention the specific linter being suppressed. + # Default: false + require-specific: true + revive: + rules: + - name: unused-parameter + disabled: true + +run: + go: "1.20" + timeout: 10m + skip-files: + - "zz_generated.*\\.go$" + - ".*conversion.*\\.go$" + allow-parallel-runners: true diff --git a/Makefile b/Makefile index 69589847..b31793f2 100644 --- a/Makefile +++ b/Makefile @@ -1,3 +1,25 @@ +# Copyright 2020 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# If you update this file, please follow +# https://www.thapaliya.com/en/writings/well-documented-makefiles/ + +# Ensure Make is run with bash shell as some syntax below is bash-specific +SHELL:=/usr/bin/env bash + +.DEFAULT_GOAL:=help + GO_VERSION ?= 1.20.0 GO_CONTAINER_IMAGE ?= docker.io/library/golang:$(GO_VERSION) @@ -18,7 +40,6 @@ GO_INSTALL := ./hack/go_install.sh BIN_DIR := bin TOOLS_BIN_DIR := $(abspath $(BIN_DIR)) - # Image URL to use all building/pushing image targets BOOTSTRAP_IMG ?= ghcr.io/cluster-api-provider-k3s/cluster-api-k3s/bootstrap-controller:v0.2.0 @@ -36,38 +57,49 @@ else GOBIN=$(shell go env GOBIN) endif -CONTROLLER_GEN_BIN = controller-gen -CONTROLLER_GEN_PKG = "sigs.k8s.io/controller-tools/cmd/controller-gen" -CONTROLLER_GEN_VER = "v0.12.0" -CONTROLLER_GEN := $(abspath $(TOOLS_BIN_DIR)/$(CONTROLLER_GEN_BIN)-$(CONTROLLER_GEN_VER)) +# Sync to controller-tools version in https://github.com/kubernetes-sigs/cluster-api/blob/v{VERSION}/hack/tools/go.mod +CONTROLLER_GEN_VER := v0.10.0 +CONTROLLER_GEN_BIN := controller-gen +CONTROLLER_GEN := $(TOOLS_BIN_DIR)/$(CONTROLLER_GEN_BIN)-$(CONTROLLER_GEN_VER) + +# Sync to github.com/drone/envsubst/v2 in https://github.com/kubernetes-sigs/cluster-api/blob/v{VERSION}/go.mod +ENVSUBST_VER := v2.0.0-20210730161058-179042472c46 +ENVSUBST_BIN := envsubst +ENVSUBST := $(TOOLS_BIN_DIR)/$(ENVSUBST_BIN) + +ENVTEST_VER := latest +ENVTEST_BIN := setup-envtest +ENVTEST := $(TOOLS_BIN_DIR)/$(ENVTEST_BIN) -.PHONY: controller-gen -controller-gen: ## Download controller-gen locally if necessary. - GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) $(CONTROLLER_GEN_PKG) $(CONTROLLER_GEN_BIN) $(CONTROLLER_GEN_VER) +# Sync to github.com/drone/envsubst/v2 in https://github.com/kubernetes-sigs/cluster-api/blob/v{VERSION}/go.mod +ENVSUBST_VER := v2.0.0-20210730161058-179042472c46 +ENVSUBST_BIN := envsubst +ENVSUBST := $(TOOLS_BIN_DIR)/$(ENVSUBST_BIN) -KUSTOMIZE = $(shell pwd)/bin/kustomize -.PHONY: kustomize -kustomize: ## Download kustomize locally if necessary. - $(call go-get-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v3@v3.8.7) +# Bump as necessary/desired to latest that supports our version of go at https://github.com/golangci/golangci-lint/releases +GOLANGCI_LINT_VER := v1.52.1 +GOLANGCI_LINT_BIN := golangci-lint +GOLANGCI_LINT := $(TOOLS_BIN_DIR)/$(GOLANGCI_LINT_BIN)-$(GOLANGCI_LINT_VER) -ENVTEST = $(shell pwd)/bin/setup-envtest -.PHONY: envtest -envtest: ## Download envtest-setup locally if necessary. - $(call go-get-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest@latest) +# Keep at 4.0.4 until we figure out how to get later verisons to not mangle the calico yamls +# HACK bump latest version once https://github.com/kubernetes-sigs/kustomize/issues/947 is fixed +KUSTOMIZE_VER := v4.0.4 +KUSTOMIZE_BIN := kustomize +KUSTOMIZE := $(TOOLS_BIN_DIR)/$(KUSTOMIZE_BIN)-$(KUSTOMIZE_VER) all-bootstrap: manager-bootstrap # Run tests -test-bootstrap: generate-bootstrap fmt vet manifests-bootstrap +test-bootstrap: $(ENVTEST) generate-bootstrap lint manifests-bootstrap go test ./... -coverprofile cover.out # Build manager binary -manager-bootstrap: generate-bootstrap fmt vet +manager-bootstrap: generate-bootstrap lint CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o bin/manager bootstrap/main.go # Run against the configured Kubernetes cluster in ~/.kube/config -run-bootstrap: generate-bootstrap fmt vet manifests-bootstrap +run-bootstrap: generate-bootstrap lint manifests-bootstrap go run ./bootstrap/main.go # Install CRDs into a cluster @@ -84,47 +116,38 @@ deploy-bootstrap: manifests-bootstrap $(KUSTOMIZE) build bootstrap/config/default | kubectl apply -f - # Generate manifests e.g. CRD, RBAC etc. -manifests-bootstrap: controller-gen +manifests-bootstrap: $(KUSTOMIZE) $(CONTROLLER_GEN) $(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=bootstrap/config/crd/bases output:rbac:dir=bootstrap/config/rbac -release-bootstrap: manifests-bootstrap +release-bootstrap: manifests-bootstrap ## Release bootstrap mkdir -p out cd bootstrap/config/manager && $(KUSTOMIZE) edit set image controller=${BOOTSTRAP_IMG} $(KUSTOMIZE) build bootstrap/config/default > out/bootstrap-components.yaml -# Run go fmt against code -fmt: - go fmt ./... - -# Run go vet against code -vet: - go vet ./... - # Generate code -generate-bootstrap: controller-gen +generate-bootstrap: $(CONTROLLER_GEN) $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." # Build the docker image -docker-build-bootstrap: manager-bootstrap +docker-build-bootstrap: manager-bootstrap ## Build bootstrap DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg package=./bootstrap/main.go --build-arg ldflags="$(LDFLAGS)" . -t ${BOOTSTRAP_IMG} # Push the docker image -docker-push-bootstrap: +docker-push-bootstrap: ## Push bootstrap docker push ${BOOTSTRAP_IMG} - all-controlplane: manager-controlplane # Run tests -test-controlplane: generate-controlplane fmt vet manifests-controlplane +test-controlplane: $(ENVTEST) generate-controlplane lint manifests-controlplane go test ./... -coverprofile cover.out # Build manager binary -manager-controlplane: generate-controlplane fmt vet +manager-controlplane: generate-controlplane lint CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o bin/manager controlplane/main.go # Run against the configured Kubernetes cluster in ~/.kube/config -run-controlplane: generate-controlplane fmt vet manifests-controlplane +run-controlplane: generate-controlplane lint manifests-controlplane go run ./controlplane/main.go # Install CRDs into a cluster @@ -141,21 +164,64 @@ deploy-controlplane: manifests-controlplane $(KUSTOMIZE) build controlplane/config/default | kubectl apply -f - # Generate manifests e.g. CRD, RBAC etc. -manifests-controlplane: controller-gen +manifests-controlplane: $(KUSTOMIZE) $(CONTROLLER_GEN) $(CONTROLLER_GEN) rbac:roleName=manager-role webhook crd paths="./..." output:crd:artifacts:config=controlplane/config/crd/bases output:rbac:dir=bootstrap/config/rbac -release-controlplane: manifests-controlplane +release-controlplane: manifests-controlplane ## Release control-plane mkdir -p out cd controlplane/config/manager && $(KUSTOMIZE) edit set image controller=${CONTROLPLANE_IMG} $(KUSTOMIZE) build controlplane/config/default > out/control-plane-components.yaml -generate-controlplane: controller-gen +generate-controlplane: $(CONTROLLER_GEN) $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." -# Build the docker image -docker-build-controlplane: manager-controlplane +docker-build-controlplane: manager-controlplane ## Build control-plane DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg package=./controlplane/main.go --build-arg ldflags="$(LDFLAGS)" . -t ${CONTROLPLANE_IMG} -# Push the docker image -docker-push-controlplane: +docker-push-controlplane: ## Push control-plane docker push ${CONTROLPLANE_IMG} + +release: release-bootstrap release-controlplane +## -------------------------------------- +## Help +## -------------------------------------- + +help: ## Display this help + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + + +## -------------------------------------- +## Linting +## -------------------------------------- + +.PHONY: lint +lint: $(GOLANGCI_LINT) ## Lint codebase + $(GOLANGCI_LINT) run -v --fast=false + +fmt: + go fmt ./... + + +## -------------------------------------- +## Tooling Binaries +## -------------------------------------- +.PHONY: envtest ## Install envtest +$(ENVTEST): ## Build envtest from tools folder. + GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) sigs.k8s.io/controller-runtime/tools/setup-envtest $(ENVTEST_BIN) $(ENVTEST_VER) + +$(ENVSUBST): ## Build envsubst from tools folder. + GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) github.com/drone/envsubst/v2/cmd/envsubst $(ENVSUBST_BIN) $(ENVSUBST_VER) + +$(GOLANGCI_LINT): ## Build golangci-lint from tools folder. + GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) github.com/golangci/golangci-lint/cmd/golangci-lint $(GOLANGCI_LINT_BIN) $(GOLANGCI_LINT_VER) + +## HACK replace with $(GO_INSTALL) once https://github.com/kubernetes-sigs/kustomize/issues/947 is fixed +$(KUSTOMIZE): ## Put kustomize into tools folder. + mkdir -p $(TOOLS_BIN_DIR) + rm -f $(TOOLS_BIN_DIR)/$(KUSTOMIZE_BIN)* + curl -fsSL "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" | bash -s -- $(KUSTOMIZE_VER:v%=%) $(TOOLS_BIN_DIR) + mv "$(TOOLS_BIN_DIR)/$(KUSTOMIZE_BIN)" $(KUSTOMIZE) + ln -sf $(KUSTOMIZE) "$(TOOLS_BIN_DIR)/$(KUSTOMIZE_BIN)" + +$(CONTROLLER_GEN): ## Build controller-gen from tools folder. + GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) sigs.k8s.io/controller-tools/cmd/controller-gen $(CONTROLLER_GEN_BIN) $(CONTROLLER_GEN_VER) diff --git a/bootstrap/api/v1beta1/condition_consts.go b/bootstrap/api/v1beta1/condition_consts.go index 6249e0d2..5ddc7381 100644 --- a/bootstrap/api/v1beta1/condition_consts.go +++ b/bootstrap/api/v1beta1/condition_consts.go @@ -40,7 +40,7 @@ const ( // // NOTE: Having the control plane machine available is a pre-condition for joining additional control planes // or workers nodes. - // DEPRECATED: This has been deprecated in v1beta1 and will be removed in a future version. + // Deprecated: This has been deprecated in v1beta1 and will be removed in a future version. // Switch to WaitingForControlPlaneAvailableReason constant from the `sigs.k8s.io/cluster-api/api/v1beta1` // package. WaitingForControlPlaneAvailableReason = clusterv1.WaitingForControlPlaneAvailableReason @@ -66,6 +66,6 @@ const ( CertificatesGenerationFailedReason = "CertificatesGenerationFailed" // CertificatesCorruptedReason (Severity=Error) documents a KThreesConfig controller detecting - // an error while while retrieving certificates for a joining node. + // an error while retrieving certificates for a joining node. CertificatesCorruptedReason = "CertificatesCorrupted" ) diff --git a/bootstrap/api/v1beta1/kthreesconfig_types.go b/bootstrap/api/v1beta1/kthreesconfig_types.go index 73ae3d7a..dfb60b0f 100644 --- a/bootstrap/api/v1beta1/kthreesconfig_types.go +++ b/bootstrap/api/v1beta1/kthreesconfig_types.go @@ -68,9 +68,9 @@ type KThreesServerConfig struct { // +optional BindAddress string `json:"bindAddress,omitempty"` - // HttpsListenPort HTTPS listen port (default: 6443) + // HTTPSListenPort HTTPS listen port (default: 6443) // +optional - HttpsListenPort string `json:"httpsListenPort,omitempty"` + HTTPSListenPort string `json:"httpsListenPort,omitempty"` // AdvertiseAddress IP address that apiserver uses to advertise to members of the cluster (default: node-external-ip/node-ip) // +optional diff --git a/bootstrap/config/crd/bases/bootstrap.cluster.x-k8s.io_kthreesconfigs.yaml b/bootstrap/config/crd/bases/bootstrap.cluster.x-k8s.io_kthreesconfigs.yaml index 8535ea2b..e3279dd9 100644 --- a/bootstrap/config/crd/bases/bootstrap.cluster.x-k8s.io_kthreesconfigs.yaml +++ b/bootstrap/config/crd/bases/bootstrap.cluster.x-k8s.io_kthreesconfigs.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.8.0 + controller-gen.kubebuilder.io/version: v0.10.0 creationTimestamp: null name: kthreesconfigs.bootstrap.cluster.x-k8s.io spec: @@ -273,9 +273,3 @@ spec: storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/bootstrap/config/crd/bases/bootstrap.cluster.x-k8s.io_kthreesconfigtemplates.yaml b/bootstrap/config/crd/bases/bootstrap.cluster.x-k8s.io_kthreesconfigtemplates.yaml index 355c746d..f2d3a6b8 100644 --- a/bootstrap/config/crd/bases/bootstrap.cluster.x-k8s.io_kthreesconfigtemplates.yaml +++ b/bootstrap/config/crd/bases/bootstrap.cluster.x-k8s.io_kthreesconfigtemplates.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.8.0 + controller-gen.kubebuilder.io/version: v0.10.0 creationTimestamp: null name: kthreesconfigtemplates.bootstrap.cluster.x-k8s.io spec: @@ -219,9 +219,3 @@ spec: type: object served: true storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/bootstrap/config/crd/bases/controlplane.cluster.x-k8s.io_kthreescontrolplanes.yaml b/bootstrap/config/crd/bases/controlplane.cluster.x-k8s.io_kthreescontrolplanes.yaml index d91e0c1b..ef7273c5 100644 --- a/bootstrap/config/crd/bases/controlplane.cluster.x-k8s.io_kthreescontrolplanes.yaml +++ b/bootstrap/config/crd/bases/controlplane.cluster.x-k8s.io_kthreescontrolplanes.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.8.0 + controller-gen.kubebuilder.io/version: v0.10.0 creationTimestamp: null name: kthreescontrolplanes.controlplane.cluster.x-k8s.io spec: @@ -105,6 +105,7 @@ spec: description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' type: string type: object + x-kubernetes-map-type: atomic kthreesConfigSpec: description: KThreesConfigSpec is a KThreesConfigSpec to use for initializing and joining machines to the control plane. @@ -413,9 +414,3 @@ spec: specReplicasPath: .spec.replicas statusReplicasPath: .status.replicas status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/bootstrap/config/manager/kustomization.yaml b/bootstrap/config/manager/kustomization.yaml index ad85de2f..9beeff2d 100644 --- a/bootstrap/config/manager/kustomization.yaml +++ b/bootstrap/config/manager/kustomization.yaml @@ -4,5 +4,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization images: - name: controller - newName: ghcr.io/zawachte/cluster-api-k3s/bootstrap-controller - newTag: v0.1.5 + newName: ghcr.io/cluster-api-provider-k3s/cluster-api-k3s/bootstrap-controller + newTag: v0.2.0 diff --git a/bootstrap/config/rbac/role.yaml b/bootstrap/config/rbac/role.yaml index 9d3c7384..659bb9dd 100644 --- a/bootstrap/config/rbac/role.yaml +++ b/bootstrap/config/rbac/role.yaml @@ -19,6 +19,14 @@ rules: - patch - update - watch +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch - apiGroups: - bootstrap.cluster.x-k8s.io resources: @@ -43,11 +51,38 @@ rules: - patch - update - watch +- apiGroups: + - bootstrap.cluster.x-k8s.io + resources: + - kubeadmconfigs + - kubeadmconfigs/status + verbs: + - get + - list + - watch - apiGroups: - cluster.x-k8s.io resources: - clusters - clusters/status + verbs: + - get + - list + - watch +- apiGroups: + - cluster.x-k8s.io + resources: + - clusters + - clusters/status + - machines + - machines/status + verbs: + - get + - list + - watch +- apiGroups: + - cluster.x-k8s.io + resources: - machines - machines/status verbs: @@ -63,3 +98,43 @@ rules: - get - list - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - packetclusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - packetclusters/status + verbs: + - get + - patch + - update +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - packetmachines + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - packetmachines/status + verbs: + - get + - patch + - update diff --git a/bootstrap/controllers/kthreesconfig_controller.go b/bootstrap/controllers/kthreesconfig_controller.go index 3a4e7736..458f03a3 100644 --- a/bootstrap/controllers/kthreesconfig_controller.go +++ b/bootstrap/controllers/kthreesconfig_controller.go @@ -18,6 +18,7 @@ package controllers import ( "context" + "errors" "fmt" corev1 "k8s.io/api/core/v1" @@ -35,7 +36,6 @@ import ( "github.com/cluster-api-provider-k3s/cluster-api-k3s/pkg/secret" "github.com/cluster-api-provider-k3s/cluster-api-k3s/pkg/token" "github.com/go-logr/logr" - "github.com/pkg/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" @@ -75,6 +75,11 @@ type Scope struct { Cluster *clusterv1.Cluster } +var ( + ErrInvalidRef = errors.New("invalid reference") + ErrFailedUnlock = errors.New("failed to unlock the k3s init lock") +) + // +kubebuilder:rbac:groups=bootstrap.cluster.x-k8s.io,resources=kthreesconfigs,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=bootstrap.cluster.x-k8s.io,resources=kthreesconfigs/status,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status;machines;machines/status,verbs=get;list;watch @@ -88,16 +93,16 @@ func (r *KThreesConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques config := &bootstrapv1.KThreesConfig{} if err := r.Client.Get(ctx, req.NamespacedName, config); err != nil { if apierrors.IsNotFound(err) { - return ctrl.Result{}, nil } + log.Error(err, "Failed to get config") return ctrl.Result{}, err } // Look up the owner of this KubeConfig if there is one configOwner, err := bsutil.GetConfigOwner(ctx, r.Client, config) - if apierrors.IsNotFound(errors.Cause(err)) { + if apierrors.IsNotFound(err) { // Could not find the owner yet, this is not an error and will rereconcile when the owner gets set. return ctrl.Result{}, nil } @@ -114,12 +119,12 @@ func (r *KThreesConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques // Lookup the cluster the config owner is associated with cluster, err := util.GetClusterByName(ctx, r.Client, configOwner.GetNamespace(), configOwner.ClusterName()) if err != nil { - if errors.Cause(err) == util.ErrNoCluster { + if errors.Is(err, util.ErrNoCluster) { log.Info(fmt.Sprintf("%s does not belong to a cluster yet, waiting until it's part of a cluster", configOwner.GetKind())) return ctrl.Result{}, nil } - if apierrors.IsNotFound(errors.Cause(err)) { + if apierrors.IsNotFound(err) { log.Info("Cluster does not exist yet, waiting until it is created") return ctrl.Result{}, nil } @@ -209,20 +214,18 @@ func (r *KThreesConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques // It's a worker join return r.joinWorker(ctx, scope) - } -func (r *KThreesConfigReconciler) joinControlplane(ctx context.Context, scope *Scope) (_ ctrl.Result, reterr error) { - +func (r *KThreesConfigReconciler) joinControlplane(ctx context.Context, scope *Scope) (ctrl.Result, error) { machine := &clusterv1.Machine{} if err := runtime.DefaultUnstructuredConverter.FromUnstructured(scope.ConfigOwner.Object, machine); err != nil { - return ctrl.Result{}, errors.Wrapf(err, "cannot convert %s to Machine", scope.ConfigOwner.GetKind()) + return ctrl.Result{}, fmt.Errorf("cannot convert %s to Machine: %w", scope.ConfigOwner.GetKind(), err) } // injects into config.Version values from top level object r.reconcileTopLevelObjectSettings(scope.Cluster, machine, scope.Config) - serverUrl := fmt.Sprintf("https://%s", scope.Cluster.Spec.ControlPlaneEndpoint.String()) + serverURL := fmt.Sprintf("https://%s", scope.Cluster.Spec.ControlPlaneEndpoint.String()) tokn, err := r.retrieveToken(ctx, scope) if err != nil { @@ -230,7 +233,7 @@ func (r *KThreesConfigReconciler) joinControlplane(ctx context.Context, scope *S return ctrl.Result{}, err } - configStruct := k3s.GenerateJoinControlPlaneConfig(serverUrl, tokn, + configStruct := k3s.GenerateJoinControlPlaneConfig(serverURL, tokn, scope.Cluster.Spec.ControlPlaneEndpoint.Host, scope.Config.Spec.ServerConfig, scope.Config.Spec.AgentConfig) @@ -274,17 +277,16 @@ func (r *KThreesConfigReconciler) joinControlplane(ctx context.Context, scope *S return ctrl.Result{}, nil } -func (r *KThreesConfigReconciler) joinWorker(ctx context.Context, scope *Scope) (_ ctrl.Result, reterr error) { - +func (r *KThreesConfigReconciler) joinWorker(ctx context.Context, scope *Scope) (ctrl.Result, error) { machine := &clusterv1.Machine{} if err := runtime.DefaultUnstructuredConverter.FromUnstructured(scope.ConfigOwner.Object, machine); err != nil { - return ctrl.Result{}, errors.Wrapf(err, "cannot convert %s to Machine", scope.ConfigOwner.GetKind()) + return ctrl.Result{}, fmt.Errorf("cannot convert %s to Machine: %w", scope.ConfigOwner.GetKind(), err) } // injects into config.Version values from top level object r.reconcileTopLevelObjectSettings(scope.Cluster, machine, scope.Config) - serverUrl := fmt.Sprintf("https://%s", scope.Cluster.Spec.ControlPlaneEndpoint.String()) + serverURL := fmt.Sprintf("https://%s", scope.Cluster.Spec.ControlPlaneEndpoint.String()) tokn, err := r.retrieveToken(ctx, scope) if err != nil { @@ -292,7 +294,7 @@ func (r *KThreesConfigReconciler) joinWorker(ctx context.Context, scope *Scope) return ctrl.Result{}, err } - configStruct := k3s.GenerateWorkerConfig(serverUrl, tokn, scope.Config.Spec.AgentConfig) + configStruct := k3s.GenerateWorkerConfig(serverURL, tokn, scope.Config.Spec.AgentConfig) b, err := kubeyaml.Marshal(configStruct) if err != nil { @@ -345,7 +347,7 @@ func (r *KThreesConfigReconciler) resolveFiles(ctx context.Context, cfg *bootstr if in.ContentFrom != nil { data, err := r.resolveSecretFileContent(ctx, cfg.Namespace, in) if err != nil { - return nil, errors.Wrapf(err, "failed to resolve file source") + return nil, fmt.Errorf("failed to resolve file source: %w", err) } in.ContentFrom = nil in.Content = string(data) @@ -362,13 +364,13 @@ func (r *KThreesConfigReconciler) resolveSecretFileContent(ctx context.Context, key := types.NamespacedName{Namespace: ns, Name: source.ContentFrom.Secret.Name} if err := r.Client.Get(ctx, key, secret); err != nil { if apierrors.IsNotFound(err) { - return nil, errors.Wrapf(err, "secret not found: %s", key) + return nil, fmt.Errorf("secret not found %s: %w", key, err) } - return nil, errors.Wrapf(err, "failed to retrieve Secret %q", key) + return nil, fmt.Errorf("failed to retrieve Secret %q: %w", key, err) } data, ok := secret.Data[source.ContentFrom.Secret.Key] if !ok { - return nil, errors.Errorf("secret references non-existent secret key: %q", source.ContentFrom.Secret.Key) + return nil, fmt.Errorf("secret references non-existent secret key %q: %w", source.ContentFrom.Secret.Key, ErrInvalidRef) } return data, nil } @@ -383,13 +385,12 @@ func (r *KThreesConfigReconciler) handleClusterNotInitialized(ctx context.Contex // if it's NOT a control plane machine, requeue if !scope.ConfigOwner.IsControlPlaneMachine() { - return ctrl.Result{RequeueAfter: 30 * time.Second}, nil } machine := &clusterv1.Machine{} if err := runtime.DefaultUnstructuredConverter.FromUnstructured(scope.ConfigOwner.Object, machine); err != nil { - return ctrl.Result{}, errors.Wrapf(err, "cannot convert %s to Machine", scope.ConfigOwner.GetKind()) + return ctrl.Result{}, fmt.Errorf("cannot convert %s to Machine: %w", scope.ConfigOwner.GetKind(), err) } // acquire the init lock so that only the first machine configured @@ -404,7 +405,7 @@ func (r *KThreesConfigReconciler) handleClusterNotInitialized(ctx context.Contex defer func() { if reterr != nil { if !r.KThreesInitLock.Unlock(ctx, scope.Cluster) { - reterr = kerrors.NewAggregate([]error{reterr, errors.New("failed to unlock the k3s init lock")}) + reterr = kerrors.NewAggregate([]error{reterr, ErrFailedUnlock}) } } }() @@ -434,7 +435,6 @@ func (r *KThreesConfigReconciler) handleClusterNotInitialized(ctx context.Contex // TODO support k3s great feature of external backends. // For now just use the etcd option - configStruct := k3s.GenerateInitControlPlaneConfig( scope.Cluster.Spec.ControlPlaneEndpoint.Host, token, @@ -517,11 +517,11 @@ func (r *KThreesConfigReconciler) generateAndStoreToken(ctx context.Context, sco // it is possible that secret creation happens but the config.Status patches are not applied if err := r.Client.Create(ctx, secret); err != nil { if !apierrors.IsAlreadyExists(err) { - return "", errors.Wrapf(err, "failed to create token for KThreesConfig %s/%s", scope.Config.Namespace, scope.Config.Name) + return "", fmt.Errorf("failed to create token for KThreesConfig %s/%s: %w", scope.Config.Namespace, scope.Config.Name, err) } // r.Log.Info("bootstrap data secret for KThreesConfig already exists, updating", "secret", secret.Name, "KThreesConfig", scope.Config.Name) if err := r.Client.Update(ctx, secret); err != nil { - return "", errors.Wrapf(err, "failed to update bootstrap token secret for KThreesConfig %s/%s", scope.Config.Namespace, scope.Config.Name) + return "", fmt.Errorf("failed to update bootstrap token secret for KThreesConfig %s/%s: %w", scope.Config.Namespace, scope.Config.Name, err) } } @@ -529,7 +529,6 @@ func (r *KThreesConfigReconciler) generateAndStoreToken(ctx context.Context, sco } func (r *KThreesConfigReconciler) retrieveToken(ctx context.Context, scope *Scope) (string, error) { - secret := &corev1.Secret{} obj := client.ObjectKey{ Namespace: scope.Config.Namespace, @@ -537,14 +536,13 @@ func (r *KThreesConfigReconciler) retrieveToken(ctx context.Context, scope *Scop } if err := r.Client.Get(ctx, obj, secret); err != nil { - return "", errors.Wrapf(err, "failed to get token for KThreesConfig %s/%s", scope.Config.Namespace, scope.Config.Name) + return "", fmt.Errorf("failed to get token for KThreesConfig %s/%s: %w", scope.Config.Namespace, scope.Config.Name, err) } return string(secret.Data["value"]), nil } func (r *KThreesConfigReconciler) SetupWithManager(mgr ctrl.Manager) error { - if r.KThreesInitLock == nil { r.KThreesInitLock = locking.NewControlPlaneInitMutex(ctrl.Log.WithName("init-locker"), mgr.GetClient()) } @@ -584,11 +582,11 @@ func (r *KThreesConfigReconciler) storeBootstrapData(ctx context.Context, scope // it is possible that secret creation happens but the config.Status patches are not applied if err := r.Client.Create(ctx, secret); err != nil { if !apierrors.IsAlreadyExists(err) { - return errors.Wrapf(err, "failed to create bootstrap data secret for KThreesConfig %s/%s", scope.Config.Namespace, scope.Config.Name) + return fmt.Errorf("failed to create bootstrap data secret for KThreesConfig %s/%s: %w", scope.Config.Namespace, scope.Config.Name, err) } r.Log.Info("bootstrap data secret for KThreesConfig already exists, updating", "secret", secret.Name, "KThreesConfig", scope.Config.Name) if err := r.Client.Update(ctx, secret); err != nil { - return errors.Wrapf(err, "failed to update bootstrap data secret for KThreesConfig %s/%s", scope.Config.Namespace, scope.Config.Name) + return fmt.Errorf("failed to update bootstrap data secret for KThreesConfig %s/%s: %w", scope.Config.Namespace, scope.Config.Name, err) } } @@ -603,22 +601,22 @@ func (r *KThreesConfigReconciler) reconcileKubeconfig(ctx context.Context, scope _, err := secret.Get(ctx, r.Client, util.ObjectKey(scope.Cluster), secret.Kubeconfig) switch { - case apierrors.IsNotFound(errors.Cause(err)): + case apierrors.IsNotFound(err): if err := kubeconfig.CreateSecret(ctx, r.Client, scope.Cluster); err != nil { - if err == kubeconfig.ErrDependentCertificateNotFound { + if errors.Is(err, kubeconfig.ErrDependentCertificateNotFound) { logger.Info("could not find secret for cluster, requeuing", "secret", secret.ClusterCA) return ctrl.Result{RequeueAfter: 30 * time.Second}, nil } return ctrl.Result{}, err } case err != nil: - return ctrl.Result{}, errors.Wrapf(err, "failed to retrieve Kubeconfig Secret for Cluster %q in namespace %q", scope.Cluster.Name, scope.Cluster.Namespace) + return ctrl.Result{}, fmt.Errorf("failed to retrieve Kubeconfig Secret for Cluster %q in namespace %q: %w", scope.Cluster.Name, scope.Cluster.Namespace, err) } return ctrl.Result{}, nil } -func (r *KThreesConfigReconciler) reconcileTopLevelObjectSettings(cluster *clusterv1.Cluster, machine *clusterv1.Machine, config *bootstrapv1.KThreesConfig) { +func (r *KThreesConfigReconciler) reconcileTopLevelObjectSettings(_ *clusterv1.Cluster, machine *clusterv1.Machine, config *bootstrapv1.KThreesConfig) { log := r.Log.WithValues("kthreesconfig", fmt.Sprintf("%s/%s", config.Namespace, config.Name)) // If there are no Version settings defined in Config, use Version from machine, if defined diff --git a/controlplane/api/v1beta1/condition_consts.go b/controlplane/api/v1beta1/condition_consts.go index f07db06a..81dac71b 100644 --- a/controlplane/api/v1beta1/condition_consts.go +++ b/controlplane/api/v1beta1/condition_consts.go @@ -27,7 +27,7 @@ const ( const ( // CertificatesAvailableCondition documents that cluster certificates were generated as part of the - // processing of a a KThreesControlPlane object. + // processing of a KThreesControlPlane object. CertificatesAvailableCondition clusterv1.ConditionType = "CertificatesAvailable" // CertificatesGenerationFailedReason (Severity=Warning) documents a KThreesControlPlane controller detecting diff --git a/controlplane/config/crd/bases/bootstrap.cluster.x-k8s.io_kthreesconfigs.yaml b/controlplane/config/crd/bases/bootstrap.cluster.x-k8s.io_kthreesconfigs.yaml index 8535ea2b..e3279dd9 100644 --- a/controlplane/config/crd/bases/bootstrap.cluster.x-k8s.io_kthreesconfigs.yaml +++ b/controlplane/config/crd/bases/bootstrap.cluster.x-k8s.io_kthreesconfigs.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.8.0 + controller-gen.kubebuilder.io/version: v0.10.0 creationTimestamp: null name: kthreesconfigs.bootstrap.cluster.x-k8s.io spec: @@ -273,9 +273,3 @@ spec: storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/controlplane/config/crd/bases/bootstrap.cluster.x-k8s.io_kthreesconfigtemplates.yaml b/controlplane/config/crd/bases/bootstrap.cluster.x-k8s.io_kthreesconfigtemplates.yaml index 355c746d..f2d3a6b8 100644 --- a/controlplane/config/crd/bases/bootstrap.cluster.x-k8s.io_kthreesconfigtemplates.yaml +++ b/controlplane/config/crd/bases/bootstrap.cluster.x-k8s.io_kthreesconfigtemplates.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.8.0 + controller-gen.kubebuilder.io/version: v0.10.0 creationTimestamp: null name: kthreesconfigtemplates.bootstrap.cluster.x-k8s.io spec: @@ -219,9 +219,3 @@ spec: type: object served: true storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/controlplane/config/crd/bases/controlplane.cluster.x-k8s.io_kthreescontrolplanes.yaml b/controlplane/config/crd/bases/controlplane.cluster.x-k8s.io_kthreescontrolplanes.yaml index d91e0c1b..ef7273c5 100644 --- a/controlplane/config/crd/bases/controlplane.cluster.x-k8s.io_kthreescontrolplanes.yaml +++ b/controlplane/config/crd/bases/controlplane.cluster.x-k8s.io_kthreescontrolplanes.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.8.0 + controller-gen.kubebuilder.io/version: v0.10.0 creationTimestamp: null name: kthreescontrolplanes.controlplane.cluster.x-k8s.io spec: @@ -105,6 +105,7 @@ spec: description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' type: string type: object + x-kubernetes-map-type: atomic kthreesConfigSpec: description: KThreesConfigSpec is a KThreesConfigSpec to use for initializing and joining machines to the control plane. @@ -413,9 +414,3 @@ spec: specReplicasPath: .spec.replicas statusReplicasPath: .status.replicas status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/controlplane/config/manager/kustomization.yaml b/controlplane/config/manager/kustomization.yaml index 1dfc6205..2da61e4c 100644 --- a/controlplane/config/manager/kustomization.yaml +++ b/controlplane/config/manager/kustomization.yaml @@ -4,5 +4,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization images: - name: controller - newName: ghcr.io/zawachte/cluster-api-k3s/controlplane-controller - newTag: v0.1.5 + newName: ghcr.io/cluster-api-provider-k3s/cluster-api-k3s/controlplane-controller + newTag: v0.2.0 diff --git a/controlplane/controllers/const.go b/controlplane/controllers/const.go index 798207f4..1d9a7fa6 100644 --- a/controlplane/controllers/const.go +++ b/controlplane/controllers/const.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Kubernetes Authors. + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/controlplane/controllers/kthreescontrolplane_controller.go b/controlplane/controllers/kthreescontrolplane_controller.go index 8dbbbadd..73e69053 100644 --- a/controlplane/controllers/kthreescontrolplane_controller.go +++ b/controlplane/controllers/kthreescontrolplane_controller.go @@ -18,24 +18,19 @@ package controllers import ( "context" + "errors" "fmt" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "strings" "time" - corev1 "k8s.io/api/core/v1" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - - kerrors "k8s.io/apimachinery/pkg/util/errors" - "github.com/cluster-api-provider-k3s/cluster-api-k3s/pkg/kubeconfig" "github.com/cluster-api-provider-k3s/cluster-api-k3s/pkg/secret" "github.com/go-logr/logr" - "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/controllers/external" @@ -71,7 +66,7 @@ type KThreesControlPlaneReconciler struct { managementClusterUncached k3s.ManagementCluster } -func (r *KThreesControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, reterr error) { +func (r *KThreesControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { logger := r.Log.WithValues("namespace", req.Namespace, "kthreesControlPlane", req.Name) // Fetch the KThreesControlPlane instance. @@ -120,7 +115,7 @@ func (r *KThreesControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl. // because the main defer may take too much time to get cluster status // Patch ObservedGeneration only if the reconciliation completed successfully patchOpts := []patch.Option{} - if reterr == nil { + if err == nil { patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{}) } if err := patchHelper.Patch(ctx, kcp, patchOpts...); err != nil { @@ -131,42 +126,42 @@ func (r *KThreesControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl. return ctrl.Result{}, nil } - defer func() { - - // Always attempt to update status. - if err := r.updateStatus(ctx, kcp, cluster); err != nil { - var connFailure *k3s.RemoteClusterConnectionError - if errors.As(err, &connFailure) { - logger.Info("Could not connect to workload cluster to fetch status", "err", err.Error()) - } else { - logger.Error(err, "Failed to update KThreesControlPlane Status") - reterr = kerrors.NewAggregate([]error{reterr, err}) - } + var res ctrl.Result + if !kcp.ObjectMeta.DeletionTimestamp.IsZero() { + // Handle deletion reconciliation loop. + res, err = r.reconcileDelete(ctx, cluster, kcp) + } else { + // Handle normal reconciliation loop. + res, err = r.reconcile(ctx, cluster, kcp) + } + + // Always attempt to update status. + if updateErr := r.updateStatus(ctx, kcp, cluster); updateErr != nil { + var connFailure *k3s.RemoteClusterConnectionError + if errors.As(err, &connFailure) { + logger.Info("Could not connect to workload cluster to fetch status", "err", updateErr.Error()) + } else { + logger.Error(err, "Failed to update KThreesControlPlane Status") + err = kerrors.NewAggregate([]error{err, updateErr}) } + } - // Always attempt to Patch the KThreesControlPlane object and status after each reconciliation. - if err := patchKThreesControlPlane(ctx, patchHelper, kcp); err != nil { - logger.Error(err, "Failed to patch KThreesControlPlane") - reterr = kerrors.NewAggregate([]error{reterr, err}) - } + // Always attempt to Patch the KThreesControlPlane object and status after each reconciliation. + if patchErr := patchKThreesControlPlane(ctx, patchHelper, kcp); patchErr != nil { + logger.Error(err, "Failed to patch KThreesControlPlane") + err = kerrors.NewAggregate([]error{err, patchErr}) + } - // TODO: remove this as soon as we have a proper remote cluster cache in place. - // Make KCP to requeue in case status is not ready, so we can check for node status without waiting for a full resync (by default 10 minutes). - // Only requeue if we are not going in exponential backoff due to error, or if we are not already re-queueing, or if the object has a deletion timestamp. - if reterr == nil && !res.Requeue && !(res.RequeueAfter > 0) && kcp.ObjectMeta.DeletionTimestamp.IsZero() { - if !kcp.Status.Ready { - res = ctrl.Result{RequeueAfter: 20 * time.Second} - } + // TODO: remove this as soon as we have a proper remote cluster cache in place. + // Make KCP to requeue in case status is not ready, so we can check for node status without waiting for a full resync (by default 10 minutes). + // Only requeue if we are not going in exponential backoff due to error, or if we are not already re-queueing, or if the object has a deletion timestamp. + if err == nil && !res.Requeue && !(res.RequeueAfter > 0) && kcp.ObjectMeta.DeletionTimestamp.IsZero() { + if !kcp.Status.Ready { + res = ctrl.Result{RequeueAfter: 20 * time.Second} } - }() - - if !kcp.ObjectMeta.DeletionTimestamp.IsZero() { - // Handle deletion reconciliation loop. - return r.reconcileDelete(ctx, cluster, kcp) } - // Handle normal reconciliation loop. - return r.reconcile(ctx, cluster, kcp) + return res, err } // reconcileDelete handles KThreesControlPlane deletion. @@ -263,7 +258,6 @@ func patchKThreesControlPlane(ctx context.Context, patchHelper *patch.Helper, kc } func (r *KThreesControlPlaneReconciler) SetupWithManager(mgr ctrl.Manager) error { - c, err := ctrl.NewControllerManagedBy(mgr). For(&controlplanev1.KThreesControlPlane{}). Owns(&clusterv1.Machine{}). @@ -271,7 +265,7 @@ func (r *KThreesControlPlaneReconciler) SetupWithManager(mgr ctrl.Manager) error // WithEventFilter(predicates.ResourceNotPaused(r.Log)). Build(r) if err != nil { - return errors.Wrap(err, "failed setting up with a controller manager") + return fmt.Errorf("failed setting up with a controller manager: %w", err) } err = c.Watch( @@ -280,7 +274,7 @@ func (r *KThreesControlPlaneReconciler) SetupWithManager(mgr ctrl.Manager) error predicates.ClusterUnpausedAndInfrastructureReady(r.Log), ) if err != nil { - return errors.Wrap(err, "failed adding Watch for Clusters to controller manager") + return fmt.Errorf("failed adding Watch for Clusters to controller manager: %w", err) } r.Scheme = mgr.GetScheme() @@ -290,6 +284,7 @@ func (r *KThreesControlPlaneReconciler) SetupWithManager(mgr ctrl.Manager) error if r.managementCluster == nil { r.managementCluster = &k3s.Management{Client: r.Client} } + if r.managementClusterUncached == nil { r.managementClusterUncached = &k3s.Management{Client: mgr.GetAPIReader()} } @@ -324,7 +319,7 @@ func (r *KThreesControlPlaneReconciler) updateStatus(ctx context.Context, kcp *c ownedMachines, err := r.managementCluster.GetMachinesForCluster(ctx, util.ObjectKey(cluster), machinefilters.OwnedMachines(kcp)) if err != nil { - return errors.Wrap(err, "failed to get list of owned machines") + return fmt.Errorf("failed to get list of owned machines: %w", err) } logger := r.Log.WithValues("namespace", kcp.Namespace, "KThreesControlPlane", kcp.Name, "cluster", cluster.Name) @@ -368,7 +363,7 @@ func (r *KThreesControlPlaneReconciler) updateStatus(ctx context.Context, kcp *c workloadCluster, err := r.managementCluster.GetWorkloadCluster(ctx, util.ObjectKey(cluster)) if err != nil { - return errors.Wrap(err, "failed to create remote cluster client") + return fmt.Errorf("failed to create remote cluster client: %w", err) } status, err := workloadCluster.ClusterStatus(ctx) if err != nil { @@ -390,7 +385,7 @@ func (r *KThreesControlPlaneReconciler) updateStatus(ctx context.Context, kcp *c } // reconcile handles KThreesControlPlane reconciliation. -func (r *KThreesControlPlaneReconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KThreesControlPlane) (res ctrl.Result, reterr error) { +func (r *KThreesControlPlaneReconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KThreesControlPlane) (ctrl.Result, error) { logger := r.Log.WithValues("namespace", kcp.Namespace, "KThreesControlPlane", kcp.Name, "cluster", cluster.Name) logger.Info("Reconcile KThreesControlPlane") @@ -429,7 +424,7 @@ func (r *KThreesControlPlaneReconciler) reconcile(ctx context.Context, cluster * adoptableMachines := controlPlaneMachines.Filter(machinefilters.AdoptableControlPlaneMachines(cluster.Name)) if len(adoptableMachines) > 0 { // We adopt the Machines and then wait for the update event for the ownership reference to re-queue them so the cache is up-to-date - //err = r.adoptMachines(ctx, kcp, adoptableMachines, cluster) + // err = r.adoptMachines(ctx, kcp, adoptableMachines, cluster) return ctrl.Result{}, err } @@ -457,15 +452,15 @@ func (r *KThreesControlPlaneReconciler) reconcile(ctx context.Context, cluster * // Ensures the number of etcd members is in sync with the number of machines/nodes. // NOTE: This is usually required after a machine deletion. - //if result, err := r.reconcileEtcdMembers(ctx, controlPlane); err != nil || !result.IsZero() { + // if result, err := r.reconcileEtcdMembers(ctx, controlPlane); err != nil || !result.IsZero() { // return result, err - //} + // } // Reconcile unhealthy machines by triggering deletion and requeue if it is considered safe to remediate, // otherwise continue with the other KCP operations. - //if result, err := r.reconcileUnhealthyMachines(ctx, controlPlane); err != nil || !result.IsZero() { + // if result, err := r.reconcileUnhealthyMachines(ctx, controlPlane); err != nil || !result.IsZero() { // return result, err - //} + // } // Control plane machines rollout due to configuration changes (e.g. upgrades) takes precedence over other operations. needRollout := controlPlane.MachinesNeedingRollout() @@ -522,7 +517,7 @@ func (r *KThreesControlPlaneReconciler) reconcile(ctx context.Context, cluster * // Update CoreDNS deployment. if err := workloadCluster.UpdateCoreDNS(ctx, kcp); err != nil { - return ctrl.Result{}, errors.Wrap(err, "failed to update CoreDNS deployment") + return ctrl.Result{}, fmt.Errorf("failed to update CoreDNS deployment") } **/ @@ -564,7 +559,7 @@ func (r *KThreesControlPlaneReconciler) reconcileKubeconfig(ctx context.Context, controllerOwnerRef := *metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind("KThreesControlPlane")) configSecret, err := secret.GetFromNamespacedName(ctx, r.Client, clusterName, secret.Kubeconfig) switch { - case apierrors.IsNotFound(errors.Cause(err)): + case apierrors.IsNotFound(err): createErr := kubeconfig.CreateSecretWithOwner( ctx, r.Client, @@ -579,7 +574,7 @@ func (r *KThreesControlPlaneReconciler) reconcileKubeconfig(ctx context.Context, return ctrl.Result{}, createErr case err != nil: - return ctrl.Result{}, errors.Wrap(err, "failed to retrieve kubeconfig Secret") + return ctrl.Result{}, fmt.Errorf("failed to retrieve kubeconfig Secret: %w", err) } // only do rotation on owned secrets @@ -597,7 +592,7 @@ func (r *KThreesControlPlaneReconciler) reconcileKubeconfig(ctx context.Context, if needsRotation { r.Log.Info("rotating kubeconfig secret") if err := kubeconfig.RegenerateSecret(ctx, r.Client, configSecret); err != nil { - return errors.Wrap(err, "failed to regenerate kubeconfig") + return fmt.Errorf("failed to regenerate kubeconfig") } } **/ @@ -616,7 +611,7 @@ func (r *KThreesControlPlaneReconciler) reconcileControlPlaneConditions(ctx cont workloadCluster, err := r.managementCluster.GetWorkloadCluster(ctx, util.ObjectKey(controlPlane.Cluster)) if err != nil { - return ctrl.Result{}, errors.Wrap(err, "cannot get remote client to workload cluster") + return ctrl.Result{}, fmt.Errorf("cannot get remote client to workload cluster: %w", err) } // Update conditions status @@ -652,26 +647,26 @@ func (r *KThreesControlPlaneReconciler) upgradeControlPlane( /** parsedVersion, err := semver.ParseTolerant(kcp.Spec.Version) if err != nil { - return ctrl.Result{}, errors.Wrapf(err, "failed to parse kubernetes version %q", kcp.Spec.Version) + return ctrl.Result{}, fmt.Errorf(err, "failed to parse kubernetes version %q", kcp.Spec.Version) } if kcp.Spec.KThreesConfigSpec.ClusterConfiguration != nil { imageRepository := kcp.Spec.KThreesConfigSpec.ClusterConfiguration.ImageRepository if err := workloadCluster.UpdateImageRepositoryInKubeadmConfigMap(ctx, imageRepository); err != nil { - return ctrl.Result{}, errors.Wrap(err, "failed to update the image repository in the kubeadm config map") + return ctrl.Result{}, fmt.Errorf("failed to update the image repository in the kubeadm config map") } } if kcp.Spec.KThreesConfigSpec.ClusterConfiguration != nil && kcp.Spec.KThreesConfigSpec.ClusterConfiguration.Etcd.Local != nil { meta := kcp.Spec.KThreesConfigSpec.ClusterConfiguration.Etcd.Local.ImageMeta if err := workloadCluster.UpdateEtcdVersionInKubeadmConfigMap(ctx, meta.ImageRepository, meta.ImageTag); err != nil { - return ctrl.Result{}, errors.Wrap(err, "failed to update the etcd version in the kubeadm config map") + return ctrl.Result{}, fmt.Errorf("failed to update the etcd version in the kubeadm config map") } } if err := workloadCluster.UpdateKubeletConfigMap(ctx, parsedVersion); err != nil { - return ctrl.Result{}, errors.Wrap(err, "failed to upgrade kubelet config map") + return ctrl.Result{}, fmt.Errorf("failed to upgrade kubelet config map") } **/ diff --git a/controlplane/controllers/scale.go b/controlplane/controllers/scale.go index 221c2eb6..b7ce8ab6 100644 --- a/controlplane/controllers/scale.go +++ b/controlplane/controllers/scale.go @@ -19,11 +19,12 @@ package controllers import ( "context" "encoding/json" + "errors" + "fmt" "strings" bootstrapv1 "github.com/cluster-api-provider-k3s/cluster-api-k3s/bootstrap/api/v1beta1" controlplanev1 "github.com/cluster-api-provider-k3s/cluster-api-k3s/controlplane/api/v1beta1" - "github.com/pkg/errors" k3s "github.com/cluster-api-provider-k3s/cluster-api-k3s/pkg/k3s" "github.com/cluster-api-provider-k3s/cluster-api-k3s/pkg/machinefilters" @@ -40,6 +41,8 @@ import ( ctrl "sigs.k8s.io/controller-runtime" ) +var ErrPreConditionFailed = errors.New("precondition check failed") + func (r *KThreesControlPlaneReconciler) initializeControlPlane(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KThreesControlPlane, controlPlane *k3s.ControlPlane) (ctrl.Result, error) { logger := controlPlane.Logger() @@ -51,9 +54,9 @@ func (r *KThreesControlPlaneReconciler) initializeControlPlane(ctx context.Conte return ctrl.Result{}, err } if len(ownedMachines) > 0 { - return ctrl.Result{}, errors.Errorf( - "control plane has already been initialized, found %d owned machine for cluster %s/%s: controller cache or management cluster is misbehaving", - len(ownedMachines), cluster.Namespace, cluster.Name, + return ctrl.Result{}, fmt.Errorf( + "control plane has already been initialized, found %d owned machine for cluster %s/%s: controller cache or management cluster is misbehaving. %w", + len(ownedMachines), cluster.Namespace, cluster.Name, err, ) } @@ -102,7 +105,7 @@ func (r *KThreesControlPlaneReconciler) scaleDownControlPlane( // Pick the Machine that we should scale down. machineToDelete, err := selectMachineForScaleDown(controlPlane, outdatedMachines) if err != nil { - return ctrl.Result{}, errors.Wrap(err, "failed to select machine for scale down") + return ctrl.Result{}, fmt.Errorf("failed to select machine for scale down: %w", err) } // Run preflight checks ensuring the control plane is stable before proceeding with a scale up/scale down operation; if not, wait. @@ -111,15 +114,15 @@ func (r *KThreesControlPlaneReconciler) scaleDownControlPlane( return result, err } - //workloadCluster, err := r.managementCluster.GetWorkloadCluster(ctx, util.ObjectKey(cluster)) - //if err != nil { + // workloadCluster, err := r.managementCluster.GetWorkloadCluster(ctx, util.ObjectKey(cluster)) + // if err != nil { // logger.Error(err, "Failed to create client to workload cluster") - // return ctrl.Result{}, errors.Wrapf(err, "failed to create client to workload cluster") - //} + // return ctrl.Result{}, fmt.Errorf(err, "failed to create client to workload cluster") + // } if machineToDelete == nil { logger.Info("Failed to pick control plane Machine to delete") - return ctrl.Result{}, errors.New("failed to pick control plane Machine to delete") + return ctrl.Result{}, fmt.Errorf("failed to pick control plane Machine to delete: %w", err) } // TODO figure out etcd complexities @@ -175,13 +178,10 @@ func (r *KThreesControlPlaneReconciler) preflightChecks(_ context.Context, contr // Check machine health conditions; if there are conditions with False or Unknown, then wait. allMachineHealthConditions := []clusterv1.ConditionType{controlplanev1.MachineAgentHealthyCondition} - if controlPlane.IsEtcdManaged() { - } machineErrors := []error{} loopmachines: for _, machine := range controlPlane.Machines { - for _, excluded := range excludeFor { // If this machine should be excluded from the individual // health check, continue the out loop. @@ -196,6 +196,7 @@ loopmachines: } } } + if len(machineErrors) > 0 { aggregatedError := kerrors.NewAggregate(machineErrors) r.recorder.Eventf(controlPlane.KCP, corev1.EventTypeWarning, "ControlPlaneUnhealthy", @@ -209,17 +210,17 @@ loopmachines: } func preflightCheckCondition(kind string, obj conditions.Getter, condition clusterv1.ConditionType) error { - c := conditions.Get(obj, condition) if c == nil { - return errors.Errorf("%s %s does not have %s condition", kind, obj.GetName(), condition) + return fmt.Errorf("%s %s does not have %s condition: %w", kind, obj.GetName(), condition, ErrPreConditionFailed) } if c.Status == corev1.ConditionFalse { - return errors.Errorf("%s %s reports %s condition is false (%s, %s)", kind, obj.GetName(), condition, c.Severity, c.Message) + return fmt.Errorf("%s %s reports %s condition is false (%s, %s): %w", kind, obj.GetName(), condition, c.Severity, c.Message, ErrPreConditionFailed) } if c.Status == corev1.ConditionUnknown { - return errors.Errorf("%s %s reports %s condition is unknown (%s)", kind, obj.GetName(), condition, c.Message) + return fmt.Errorf("%s %s reports %s condition is unknown (%s): %w", kind, obj.GetName(), condition, c.Message, ErrPreConditionFailed) } + return nil } @@ -259,26 +260,26 @@ func (r *KThreesControlPlaneReconciler) cloneConfigsAndGenerateMachine(ctx conte }) if err != nil { // Safe to return early here since no resources have been created yet. - return errors.Wrap(err, "failed to clone infrastructure template") + return fmt.Errorf("failed to clone infrastructure template: %w", err) } // Clone the bootstrap configuration bootstrapRef, err := r.generateKThreesConfig(ctx, kcp, cluster, bootstrapSpec) if err != nil { - errs = append(errs, errors.Wrap(err, "failed to generate bootstrap config")) + errs = append(errs, fmt.Errorf("failed to generate bootstrap config: %w", err)) } // Only proceed to generating the Machine if we haven't encountered an error if len(errs) == 0 { if err := r.generateMachine(ctx, kcp, cluster, infraRef, bootstrapRef, failureDomain); err != nil { - errs = append(errs, errors.Wrap(err, "failed to create Machine")) + errs = append(errs, fmt.Errorf("failed to create Machine: %w", err)) } } // If we encountered any errors, attempt to clean up any dangling resources if len(errs) > 0 { if err := r.cleanupFromGeneration(ctx, infraRef, bootstrapRef); err != nil { - errs = append(errs, errors.Wrap(err, "failed to cleanup generated resources")) + errs = append(errs, fmt.Errorf("failed to cleanup generated resources: %w", err)) } return kerrors.NewAggregate(errs) @@ -299,7 +300,7 @@ func (r *KThreesControlPlaneReconciler) cleanupFromGeneration(ctx context.Contex config.SetName(ref.Name) if err := r.Client.Delete(ctx, config); err != nil && !apierrors.IsNotFound(err) { - errs = append(errs, errors.Wrap(err, "failed to cleanup generated resources after error")) + errs = append(errs, fmt.Errorf("failed to cleanup generated resources after error: %w", err)) } } } @@ -327,7 +328,7 @@ func (r *KThreesControlPlaneReconciler) generateKThreesConfig(ctx context.Contex } if err := r.Client.Create(ctx, bootstrapConfig); err != nil { - return nil, errors.Wrap(err, "Failed to create bootstrap configuration") + return nil, fmt.Errorf("failed to create bootstrap configuration: %w", err) } bootstrapRef := &corev1.ObjectReference{ @@ -367,12 +368,12 @@ func (r *KThreesControlPlaneReconciler) generateMachine(ctx context.Context, kcp // We store ClusterConfiguration as annotation here to detect any changes in KCP ClusterConfiguration and rollout the machine if any. serverConfig, err := json.Marshal(kcp.Spec.KThreesConfigSpec.ServerConfig) if err != nil { - return errors.Wrap(err, "failed to marshal cluster configuration") + return fmt.Errorf("failed to marshal cluster configuration: %w", err) } machine.SetAnnotations(map[string]string{controlplanev1.KThreesServerConfigurationAnnotation: string(serverConfig)}) if err := r.Client.Create(ctx, machine); err != nil { - return errors.Wrap(err, "failed to create machine") + return fmt.Errorf("failed to create machine: %w", err) } return nil } diff --git a/controlplane/controllers/suite_test.go b/controlplane/controllers/suite_test.go index fe04cbad..e3881cab 100644 --- a/controlplane/controllers/suite_test.go +++ b/controlplane/controllers/suite_test.go @@ -48,8 +48,6 @@ func TestAPIs(t *testing.T) { } var _ = BeforeSuite(func(done Done) { - //logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) - By("bootstrapping test environment") testEnv = &envtest.Environment{ CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, diff --git a/pkg/cloudinit/cloudinit.go b/pkg/cloudinit/cloudinit.go index 55a45fd5..fe7c26da 100644 --- a/pkg/cloudinit/cloudinit.go +++ b/pkg/cloudinit/cloudinit.go @@ -18,11 +18,11 @@ package cloudinit import ( "bytes" + "fmt" "strings" "text/template" bootstrapv1 "github.com/cluster-api-provider-k3s/cluster-api-k3s/bootstrap/api/v1beta1" - "github.com/pkg/errors" ) var ( @@ -82,39 +82,24 @@ type BaseUserData struct { K3sVersion string } -func (input *BaseUserData) prepare() error { - input.Header = cloudConfigHeader - input.WriteFiles = append(input.WriteFiles, input.AdditionalFiles...) - - /** - scriptFile, err := generateBootstrapScript(input) - if err != nil { - return errors.Wrap(err, "failed to generate user data for machine joining control plane") - } - input.WriteFiles = append(input.WriteFiles, *scriptFile) - **/ - - return nil -} - func generate(kind string, tpl string, data interface{}) ([]byte, error) { tm := template.New(kind).Funcs(defaultTemplateFuncMap) if _, err := tm.Parse(filesTemplate); err != nil { - return nil, errors.Wrap(err, "failed to parse files template") + return nil, fmt.Errorf("failed to parse files template: %w", err) } if _, err := tm.Parse(commandsTemplate); err != nil { - return nil, errors.Wrap(err, "failed to parse commands template") + return nil, fmt.Errorf("failed to parse commands template: %w", err) } t, err := tm.Parse(tpl) if err != nil { - return nil, errors.Wrapf(err, "failed to parse %s template", kind) + return nil, fmt.Errorf("failed to parse %s template: %w", kind, err) } var out bytes.Buffer if err := t.Execute(&out, data); err != nil { - return nil, errors.Wrapf(err, "failed to generate %s template", kind) + return nil, fmt.Errorf("failed to generate %s template: %w", kind, err) } return out.Bytes(), nil diff --git a/pkg/cloudinit/controlplane_init_test.go b/pkg/cloudinit/controlplane_init_test.go index 54fe7b30..15fad6a0 100644 --- a/pkg/cloudinit/controlplane_init_test.go +++ b/pkg/cloudinit/controlplane_init_test.go @@ -25,7 +25,6 @@ import ( ) func TestControlPlaneInit(t *testing.T) { - g := NewWithT(t) cpinput := &ControlPlaneInput{ diff --git a/pkg/k3s/config.go b/pkg/k3s/config.go index 970e0d40..ae365d33 100644 --- a/pkg/k3s/config.go +++ b/pkg/k3s/config.go @@ -15,7 +15,7 @@ type K3sServerConfig struct { KubeControllerManagerArgs []string `json:"kube-controller-manager-arg,omitempty"` TLSSan []string `json:"tls-san,omitempty"` BindAddress string `json:"bind-address,omitempty"` - HttpsListenPort string `json:"https-listen-port,omitempty"` + HTTPSListenPort string `json:"https-listen-port,omitempty"` AdvertiseAddress string `json:"advertise-address,omitempty"` AdvertisePort string `json:"advertise-port,omitempty"` ClusterCidr string `json:"cluster-cidr,omitempty"` @@ -42,11 +42,11 @@ func GenerateInitControlPlaneConfig(controlPlaneEndpoint string, token string, s k3sServerConfig := K3sServerConfig{ DisableCloudController: true, ClusterInit: true, - KubeAPIServerArgs: append(serverConfig.KubeAPIServerArgs, "anonymous-auth=true", getTlsCipherSuiteArg()), + KubeAPIServerArgs: append(serverConfig.KubeAPIServerArgs, "anonymous-auth=true", getTLSCipherSuiteArg()), TLSSan: append(serverConfig.TLSSan, controlPlaneEndpoint), KubeControllerManagerArgs: append(serverConfig.KubeControllerManagerArgs, "cloud-provider=external"), BindAddress: serverConfig.BindAddress, - HttpsListenPort: serverConfig.HttpsListenPort, + HTTPSListenPort: serverConfig.HTTPSListenPort, AdvertiseAddress: serverConfig.AdvertiseAddress, AdvertisePort: serverConfig.AdvertisePort, ClusterCidr: serverConfig.ClusterCidr, @@ -69,15 +69,14 @@ func GenerateInitControlPlaneConfig(controlPlaneEndpoint string, token string, s return k3sServerConfig } -func GenerateJoinControlPlaneConfig(serverUrl string, token string, controlplaneendpoint string, serverConfig bootstrapv1.KThreesServerConfig, agentConfig bootstrapv1.KThreesAgentConfig) K3sServerConfig { - +func GenerateJoinControlPlaneConfig(serverURL string, token string, controlplaneendpoint string, serverConfig bootstrapv1.KThreesServerConfig, agentConfig bootstrapv1.KThreesAgentConfig) K3sServerConfig { k3sServerConfig := K3sServerConfig{ DisableCloudController: true, - KubeAPIServerArgs: append(serverConfig.KubeAPIServerArgs, "anonymous-auth=true", getTlsCipherSuiteArg()), + KubeAPIServerArgs: append(serverConfig.KubeAPIServerArgs, "anonymous-auth=true", getTLSCipherSuiteArg()), TLSSan: append(serverConfig.TLSSan, controlplaneendpoint), KubeControllerManagerArgs: append(serverConfig.KubeControllerManagerArgs, "cloud-provider=external"), BindAddress: serverConfig.BindAddress, - HttpsListenPort: serverConfig.HttpsListenPort, + HTTPSListenPort: serverConfig.HTTPSListenPort, AdvertiseAddress: serverConfig.AdvertiseAddress, AdvertisePort: serverConfig.AdvertisePort, ClusterCidr: serverConfig.ClusterCidr, @@ -89,7 +88,7 @@ func GenerateJoinControlPlaneConfig(serverUrl string, token string, controlplane k3sServerConfig.K3sAgentConfig = K3sAgentConfig{ Token: token, - Server: serverUrl, + Server: serverURL, KubeletArgs: append(agentConfig.KubeletArgs, "cloud-provider=external"), NodeLabels: agentConfig.NodeLabels, NodeTaints: agentConfig.NodeTaints, @@ -101,9 +100,9 @@ func GenerateJoinControlPlaneConfig(serverUrl string, token string, controlplane return k3sServerConfig } -func GenerateWorkerConfig(serverUrl string, token string, agentConfig bootstrapv1.KThreesAgentConfig) K3sAgentConfig { +func GenerateWorkerConfig(serverURL string, token string, agentConfig bootstrapv1.KThreesAgentConfig) K3sAgentConfig { return K3sAgentConfig{ - Server: serverUrl, + Server: serverURL, Token: token, KubeletArgs: append(agentConfig.KubeletArgs, "cloud-provider=external"), NodeLabels: agentConfig.NodeLabels, @@ -114,8 +113,7 @@ func GenerateWorkerConfig(serverUrl string, token string, agentConfig bootstrapv } } -func getTlsCipherSuiteArg() string { - +func getTLSCipherSuiteArg() string { /** Can't use this method because k3s is using older apiserver pkgs that hardcode a subset of ciphers. https://github.com/k3s-io/k3s/blob/master/vendor/k8s.io/component-base/cli/flag/ciphersuites_flag.go#L29 diff --git a/pkg/k3s/control_plane.go b/pkg/k3s/control_plane.go index ddf8edf7..83da9095 100644 --- a/pkg/k3s/control_plane.go +++ b/pkg/k3s/control_plane.go @@ -18,10 +18,11 @@ package k3s import ( "context" + "errors" + "fmt" "github.com/cluster-api-provider-k3s/cluster-api-k3s/pkg/machinefilters" "github.com/go-logr/logr" - "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -39,6 +40,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) +var ( + ErrFailedToPickForDeletion = errors.New("failed to pick machine to mark for deletion") + ErrFailedToCreatePatchHelper = errors.New("failed to create patch for machine") +) + // ControlPlane holds business logic around control planes. // It should never need to connect to a service, that responsibility lies outside of this struct. // Going forward we should be trying to add more logic to here and reduce the amount of logic in the reconciler. @@ -71,7 +77,7 @@ func NewControlPlane(ctx context.Context, client client.Client, cluster *cluster for _, machine := range ownedMachines { patchHelper, err := patch.NewHelper(machine, client) if err != nil { - return nil, errors.Wrapf(err, "failed to create patch helper for machine %s", machine.Name) + return nil, fmt.Errorf("machine: %s, %w", machine.Name, ErrFailedToCreatePatchHelper) } patchHelpers[machine.Name] = patchHelper } @@ -131,7 +137,7 @@ func (c *ControlPlane) MachineInFailureDomainWithMostMachines(machines Filterabl machinesInFailureDomain := machines.Filter(machinefilters.InFailureDomains(fd)) machineToMark := machinesInFailureDomain.Oldest() if machineToMark == nil { - return nil, errors.New("failed to pick control plane Machine to mark for deletion") + return nil, ErrFailedToPickForDeletion } return machineToMark, nil } @@ -139,7 +145,7 @@ func (c *ControlPlane) MachineInFailureDomainWithMostMachines(machines Filterabl // MachineWithDeleteAnnotation returns a machine that has been annotated with DeleteMachineAnnotation key. func (c *ControlPlane) MachineWithDeleteAnnotation(machines FilterableMachineCollection) FilterableMachineCollection { // See if there are any machines with DeleteMachineAnnotation key. - //annotatedMachines := machines.Filter(machinefilters.HasAnnotationKey(clusterv1.DeleteMachineAnnotation)) + // annotatedMachines := machines.Filter(machinefilters.HasAnnotationKey(clusterv1.DeleteMachineAnnotation)) // If there are, return list of annotated machines. return nil } @@ -274,10 +280,10 @@ func getInfraResources(ctx context.Context, cl client.Client, machines Filterabl for _, m := range machines { infraObj, err := external.Get(ctx, cl, &m.Spec.InfrastructureRef, m.Namespace) if err != nil { - if apierrors.IsNotFound(errors.Cause(err)) { + if apierrors.IsNotFound(err) { continue } - return nil, errors.Wrapf(err, "failed to retrieve infra obj for machine %q", m.Name) + return nil, fmt.Errorf("failed to retrieve infra obj for machine %q, %w", m.Name, err) } result[m.Name] = infraObj } @@ -294,10 +300,10 @@ func getKThreesConfigs(ctx context.Context, cl client.Client, machines Filterabl } machineConfig := &bootstrapv1.KThreesConfig{} if err := cl.Get(ctx, client.ObjectKey{Name: bootstrapRef.Name, Namespace: m.Namespace}, machineConfig); err != nil { - if apierrors.IsNotFound(errors.Cause(err)) { + if apierrors.IsNotFound(err) { continue } - return nil, errors.Wrapf(err, "failed to retrieve bootstrap config for machine %q", m.Name) + return nil, fmt.Errorf("failed to retrieve bootstrap config for machine %q: %w", m.Name, err) } result[m.Name] = machineConfig } @@ -333,11 +339,12 @@ func (c *ControlPlane) PatchMachines(ctx context.Context) error { controlplanev1.MachineAgentHealthyCondition, controlplanev1.MachineEtcdMemberHealthyCondition, }}); err != nil { - errList = append(errList, errors.Wrapf(err, "failed to patch machine %s", machine.Name)) + errList = append(errList, fmt.Errorf("failed to patch machine %s: %w", machine.Name, err)) } continue } - errList = append(errList, errors.Errorf("failed to get patch helper for machine %s", machine.Name)) + errList = append(errList, fmt.Errorf("machine: %s, %w", machine.Name, ErrFailedToCreatePatchHelper)) } + return kerrors.NewAggregate(errList) } diff --git a/pkg/k3s/management_cluster.go b/pkg/k3s/management_cluster.go index 3edb762a..86a3f583 100644 --- a/pkg/k3s/management_cluster.go +++ b/pkg/k3s/management_cluster.go @@ -2,9 +2,9 @@ package k3s import ( "context" + "fmt" "time" - "github.com/pkg/errors" "k8s.io/client-go/kubernetes/scheme" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/controllers/remote" @@ -12,20 +12,21 @@ import ( "github.com/cluster-api-provider-k3s/cluster-api-k3s/pkg/machinefilters" "sigs.k8s.io/controller-runtime/pkg/client" - ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" ) // ManagementCluster defines all behaviors necessary for something to function as a management cluster. type ManagementCluster interface { - ctrlclient.Reader + client.Reader GetMachinesForCluster(ctx context.Context, cluster client.ObjectKey, filters ...machinefilters.Func) (FilterableMachineCollection, error) - GetWorkloadCluster(ctx context.Context, clusterKey client.ObjectKey) (WorkloadCluster, error) + GetWorkloadCluster(ctx context.Context, clusterKey client.ObjectKey) (*Workload, error) } // Management holds operations on the management cluster. type Management struct { - Client ctrlclient.Reader + ManagementCluster + + Client client.Reader } // RemoteClusterConnectionError represents a failure to connect to a remote cluster @@ -38,12 +39,12 @@ func (e *RemoteClusterConnectionError) Error() string { return e.Name + ": " + e func (e *RemoteClusterConnectionError) Unwrap() error { return e.Err } // Get implements ctrlclient.Reader -func (m *Management) Get(ctx context.Context, key ctrlclient.ObjectKey, obj client.Object) error { +func (m *Management) Get(ctx context.Context, key client.ObjectKey, obj client.Object) error { return m.Client.Get(ctx, key, obj) } // List implements ctrlclient.Reader -func (m *Management) List(ctx context.Context, list client.ObjectList, opts ...ctrlclient.ListOption) error { +func (m *Management) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { return m.Client.List(ctx, list, opts...) } @@ -55,7 +56,7 @@ func (m *Management) GetMachinesForCluster(ctx context.Context, cluster client.O } ml := &clusterv1.MachineList{} if err := m.Client.List(ctx, ml, client.InNamespace(cluster.Namespace), client.MatchingLabels(selector)); err != nil { - return nil, errors.Wrap(err, "failed to list machines") + return nil, fmt.Errorf("failed to list machines: %w", err) } machines := NewFilterableMachineCollectionFromMachineList(ml) @@ -69,7 +70,7 @@ const ( // GetWorkloadCluster builds a cluster object. // The cluster comes with an etcd client generator to connect to any etcd pod living on a managed machine. -func (m *Management) GetWorkloadCluster(ctx context.Context, clusterKey client.ObjectKey) (WorkloadCluster, error) { +func (m *Management) GetWorkloadCluster(ctx context.Context, clusterKey client.ObjectKey) (*Workload, error) { restConfig, err := remote.RESTConfig(ctx, KThreesControlPlaneControllerName, m.Client, clusterKey) if err != nil { return nil, err diff --git a/pkg/k3s/workload_cluster.go b/pkg/k3s/workload_cluster.go index 762d2091..5a9b5eb7 100644 --- a/pkg/k3s/workload_cluster.go +++ b/pkg/k3s/workload_cluster.go @@ -2,11 +2,11 @@ package k3s import ( "context" + "errors" "fmt" "strings" controlplanev1 "github.com/cluster-api-provider-k3s/cluster-api-k3s/controlplane/api/v1beta1" - "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -47,9 +47,11 @@ type WorkloadCluster interface { // Workload defines operations on workload clusters. type Workload struct { + WorkloadCluster + Client ctrlclient.Client CoreDNSMigrator coreDNSMigrator - //etcdClientGenerator etcdClientFor + // etcdClientGenerator etcdClientFor } // ClusterStatus holds stats information about the cluster. @@ -202,7 +204,6 @@ func (w *Workload) UpdateAgentConditions(ctx context.Context, controlPlane *Cont conditions.MarkTrue(machine, controlplanev1.MachineAgentHealthyCondition) } } - } // If there are provisioned machines without corresponding nodes, report this as a failing conditions with SeverityError. @@ -363,5 +364,4 @@ func (w *Workload) updateManagedEtcdConditions(ctx context.Context, controlPlane conditions.MarkTrue(machine, controlplanev1.MachineEtcdMemberHealthyCondition) } - } diff --git a/pkg/k3s/workload_cluster_coredns.go b/pkg/k3s/workload_cluster_coredns.go index 8005c9d3..03fee446 100644 --- a/pkg/k3s/workload_cluster_coredns.go +++ b/pkg/k3s/workload_cluster_coredns.go @@ -18,18 +18,17 @@ package k3s import ( "context" + "errors" "fmt" controlplanev1 "github.com/cluster-api-provider-k3s/cluster-api-k3s/controlplane/api/v1beta1" "github.com/coredns/corefile-migration/migration" - "github.com/pkg/errors" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/version" - ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" ) const ( @@ -63,6 +62,8 @@ type coreDNSInfo struct { ToImage string } +var ErrInvalidCoreDNSVersion = errors.New("invalid CoreDNS version given") + // UpdateCoreDNS updates the coredns corefile and coredns // deployment. func (w *Workload) UpdateCoreDNS(ctx context.Context, kcp *controlplanev1.KThreesControlPlane) error { @@ -101,7 +102,7 @@ func (w *Workload) UpdateCoreDNS(ctx context.Context, kcp *controlplanev1.KThree // Validate the image tag. if err := validateCoreDNSImageTag(info.FromImageTag, info.ToImageTag); err != nil { - return errors.Wrapf(err, "failed to validate CoreDNS") + return fmt.Errorf(err, "failed to validate CoreDNS") } // Perform the upgrade. @@ -112,19 +113,19 @@ func (w *Workload) UpdateCoreDNS(ctx context.Context, kcp *controlplanev1.KThree return err } if err := w.updateCoreDNSDeployment(ctx, info); err != nil { - return errors.Wrap(err, "unable to update coredns deployment") + return fmt.Errorf("unable to update coredns deployment") } **/ return nil } -func (w *Workload) getConfigMap(ctx context.Context, configMap ctrlclient.ObjectKey) (*corev1.ConfigMap, error) { - original := &corev1.ConfigMap{} - if err := w.Client.Get(ctx, configMap, original); err != nil { - return nil, errors.Wrapf(err, "error getting %s/%s configmap from target cluster", configMap.Namespace, configMap.Name) - } - return original.DeepCopy(), nil -} +// func (w *Workload) getConfigMap(ctx context.Context, configMap ctrlclient.ObjectKey) (*corev1.ConfigMap, error) { +// original := &corev1.ConfigMap{} +// if err := w.Client.Get(ctx, configMap, original); err != nil { +// return nil, fmt.Errorf(err, "error getting %s/%s configmap from target cluster", configMap.Namespace, configMap.Name) +// } +// return original.DeepCopy(), nil +// } /** // getCoreDNSInfo returns all necessary coredns based information. @@ -133,7 +134,7 @@ func (w *Workload) getCoreDNSInfo(ctx context.Context, clusterConfig *kubeadmv1. key := ctrlclient.ObjectKey{Name: coreDNSKey, Namespace: metav1.NamespaceSystem} cm, err := w.getConfigMap(ctx, key) if err != nil { - return nil, errors.Wrapf(err, "error getting %v config map from target cluster", key) + return nil, fmt.Errorf(err, "error getting %v config map from target cluster", key) } corefile, ok := cm.Data[corefileKey] if !ok { @@ -143,7 +144,7 @@ func (w *Workload) getCoreDNSInfo(ctx context.Context, clusterConfig *kubeadmv1. // Get the current CoreDNS deployment. deployment := &appsv1.Deployment{} if err := w.Client.Get(ctx, key, deployment); err != nil { - return nil, errors.Wrapf(err, "unable to get %v deployment from target cluster", key) + return nil, fmt.Errorf(err, "unable to get %v deployment from target cluster", key) } var container *corev1.Container @@ -160,7 +161,7 @@ func (w *Workload) getCoreDNSInfo(ctx context.Context, clusterConfig *kubeadmv1. // Parse container image. parsedImage, err := containerutil.ImageFromString(container.Image) if err != nil { - return nil, errors.Wrapf(err, "unable to parse %q deployment image", container.Image) + return nil, fmt.Errorf(err, "unable to parse %q deployment image", container.Image) } // Handle imageRepository. @@ -226,12 +227,14 @@ func (w *Workload) updateCoreDNSImageInfoInKubeadmConfigMap(ctx context.Context, // updateCoreDNSCorefile migrates the coredns corefile if there is an increase // in version number. It also creates a corefile backup and patches the // deployment to point to the backup corefile before migrating. +// +//lint:ignore U1000 Ignore func (w *Workload) updateCoreDNSCorefile(ctx context.Context, info *coreDNSInfo) error { // Run the CoreDNS migration tool first because if it cannot migrate the // corefile, then there's no point in continuing further. updatedCorefile, err := w.CoreDNSMigrator.Migrate(info.CurrentMajorMinorPatch, info.TargetMajorMinorPatch, info.Corefile, false) if err != nil { - return errors.Wrap(err, "unable to migrate CoreDNS corefile") + return fmt.Errorf("unable to migrate CoreDNS corefile: %w", err) } // First we backup the Corefile by backing it up. @@ -245,7 +248,7 @@ func (w *Workload) updateCoreDNSCorefile(ctx context.Context, info *coreDNSInfo) corefileBackupKey: info.Corefile, }, }); err != nil { - return errors.Wrap(err, "unable to update CoreDNS config map with backup Corefile") + return fmt.Errorf("unable to update CoreDNS config map with backup Corefile: %w", err) } // Patching the coredns deployment to point to the Corefile-backup @@ -269,12 +272,13 @@ func (w *Workload) updateCoreDNSCorefile(ctx context.Context, info *coreDNSInfo) corefileBackupKey: info.Corefile, }, }); err != nil { - return errors.Wrap(err, "unable to update CoreDNS config map") + return fmt.Errorf("unable to update CoreDNS config map: %w", ErrInvalidCoreDNSVersion) } return nil } +//lint:ignore U1000 Ignore func patchCoreDNSDeploymentVolume(deployment *appsv1.Deployment, fromKey, toKey string) { for _, volume := range deployment.Spec.Template.Spec.Volumes { if volume.Name == coreDNSVolumeKey && volume.ConfigMap != nil && volume.ConfigMap.Name == coreDNSKey { @@ -287,6 +291,7 @@ func patchCoreDNSDeploymentVolume(deployment *appsv1.Deployment, fromKey, toKey } } +//lint:ignore U1000 Ignore func patchCoreDNSDeploymentImage(deployment *appsv1.Deployment, image string) { containers := deployment.Spec.Template.Spec.Containers for idx, c := range containers { @@ -296,6 +301,7 @@ func patchCoreDNSDeploymentImage(deployment *appsv1.Deployment, image string) { } } +//lint:ignore U1000 Ignore func extractImageVersion(tag string) (string, error) { ver, err := version.ParseMajorMinorPatch(tag) if err != nil { @@ -307,23 +313,24 @@ func extractImageVersion(tag string) (string, error) { // validateCoreDNSImageTag returns error if the versions don't meet requirements. // Some of the checks come from // https://github.com/coredns/corefile-migration/blob/v1.0.6/migration/migrate.go#L414 -func validateCoreDNSImageTag(fromTag, toTag string) error { - from, err := version.ParseMajorMinorPatch(fromTag) - if err != nil { - return errors.Wrapf(err, "failed to parse CoreDNS current version %q", fromTag) - } - to, err := version.ParseMajorMinorPatch(toTag) - if err != nil { - return errors.Wrapf(err, "failed to parse CoreDNS target version %q", toTag) - } - // make sure that the version we're upgrading to is greater than the current one, - // or if they're the same version, the raw tags should be different (e.g. allow from `v1.17.4-somevendor.0` to `v1.17.4-somevendor.1`). - if x := from.Compare(to); x > 0 || (x == 0 && fromTag == toTag) { - return fmt.Errorf("toVersion %q must be greater than fromVersion %q", toTag, fromTag) - } - // check if the from version is even in the list of coredns versions - if _, ok := migration.Versions[fmt.Sprintf("%d.%d.%d", from.Major, from.Minor, from.Patch)]; !ok { - return fmt.Errorf("fromVersion %q is not a compatible coredns version", from.String()) - } - return nil -} +// func validateCoreDNSImageTag(fromTag, toTag string) error { +// from, err := version.ParseMajorMinorPatch(fromTag) +// if err != nil { +// return fmt.Errorf("failed to parse CoreDNS current version %q: %w", fromTag, err) +// } +// to, err := version.ParseMajorMinorPatch(toTag) +// if err != nil { +// return fmt.Errorf("failed to parse CoreDNS target version %q: %w", toTag, err) +// } +// // make sure that the version we're upgrading to is greater than the current one, +// // or if they're the same version, the raw tags should be different (e.g. allow from `v1.17.4-somevendor.0` to `v1.17.4-somevendor.1`). +// if x := from.Compare(to); x > 0 || (x == 0 && fromTag == toTag) { +// return fmt.Errorf("toVersion %q must be greater than fromVersion %q: %w", toTag, fromTag, ErrInvalidCoreDNSVersion) +// } + +// // check if the from version is even in the list of coredns versions +// if _, ok := migration.Versions[fmt.Sprintf("%d.%d.%d", from.Major, from.Minor, from.Patch)]; !ok { +// return fmt.Errorf("fromVersion %q is not a compatible version: %w", fromTag, ErrInvalidCoreDNSVersion) +// } +// return nil +// } diff --git a/pkg/kubeconfig/kubeconfig.go b/pkg/kubeconfig/kubeconfig.go index e67347e3..c57a70e5 100644 --- a/pkg/kubeconfig/kubeconfig.go +++ b/pkg/kubeconfig/kubeconfig.go @@ -4,9 +4,9 @@ import ( "context" "crypto" "crypto/x509" + "errors" "fmt" - "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -23,12 +23,14 @@ import ( var ( ErrDependentCertificateNotFound = errors.New("could not find secret ca") + ErrCertNotInKubeconfig = errors.New("certificate not found in config") + ErrCAPrivateKeyNotFound = errors.New("CA private key not found") ) func generateKubeconfig(ctx context.Context, c client.Client, clusterName client.ObjectKey, endpoint string) ([]byte, error) { clusterCA, err := secret.GetFromNamespacedName(ctx, c, clusterName, secret.ClusterCA) if err != nil { - if apierrors.IsNotFound(errors.Cause(err)) { + if apierrors.IsNotFound(err) { return nil, ErrDependentCertificateNotFound } return nil, err @@ -36,7 +38,7 @@ func generateKubeconfig(ctx context.Context, c client.Client, clusterName client clientClusterCA, err := secret.GetFromNamespacedName(ctx, c, clusterName, secret.ClientClusterCA) if err != nil { - if apierrors.IsNotFound(errors.Cause(err)) { + if apierrors.IsNotFound(err) { return nil, ErrDependentCertificateNotFound } return nil, err @@ -44,40 +46,39 @@ func generateKubeconfig(ctx context.Context, c client.Client, clusterName client clientCACert, err := certs.DecodeCertPEM(clientClusterCA.Data[secret.TLSCrtDataName]) if err != nil { - return nil, errors.Wrap(err, "failed to decode CA Cert") + return nil, fmt.Errorf("failed to decode CA Cert: %w", err) } else if clientCACert == nil { - return nil, errors.New("certificate not found in config") + return nil, ErrCertNotInKubeconfig } clientCAKey, err := certs.DecodePrivateKeyPEM(clientClusterCA.Data[secret.TLSKeyDataName]) if err != nil { - return nil, errors.Wrap(err, "failed to decode private key") + return nil, fmt.Errorf("failed to decode private key: %w", err) } else if clientCAKey == nil { - return nil, errors.New("CA private key not found") + return nil, ErrCAPrivateKeyNotFound } serverCACert, err := certs.DecodeCertPEM(clusterCA.Data[secret.TLSCrtDataName]) if err != nil { - return nil, errors.Wrap(err, "failed to decode CA Cert") + return nil, fmt.Errorf("failed to decode CA Cert: %w", err) } else if serverCACert == nil { - return nil, errors.New("certificate not found in config") + return nil, ErrCertNotInKubeconfig } cfg, err := New(clusterName.Name, endpoint, clientCACert, clientCAKey, serverCACert) if err != nil { - return nil, errors.Wrap(err, "failed to generate a kubeconfig") + return nil, fmt.Errorf("failed to generate a kubeconfig: %w", err) } out, err := clientcmd.Write(*cfg) if err != nil { - return nil, errors.Wrap(err, "failed to serialize config to yaml") + return nil, fmt.Errorf("failed to serialize config to yaml: %w", err) } return out, nil } // New creates a new Kubeconfig using the cluster name and specified endpoint. func New(clusterName, endpoint string, clientCACert *x509.Certificate, clientCAKey crypto.Signer, serverCACert *x509.Certificate) (*api.Config, error) { - cfg := &certs.Config{ CommonName: "kubernetes-admin", Organization: []string{"system:masters"}, @@ -86,12 +87,12 @@ func New(clusterName, endpoint string, clientCACert *x509.Certificate, clientCAK clientKey, err := certs.NewPrivateKey() if err != nil { - return nil, errors.Wrap(err, "unable to create private key") + return nil, fmt.Errorf("unable to create private key: %w", err) } clientCert, err := cfg.NewSignedCert(clientKey, clientCACert, clientCAKey) if err != nil { - return nil, errors.Wrap(err, "unable to sign certificate") + return nil, fmt.Errorf("unable to sign certificate: %w", err) } userName := fmt.Sprintf("%s-admin", clusterName) diff --git a/pkg/locking/control_plane_init_mutex.go b/pkg/locking/control_plane_init_mutex.go index 0a3027f7..06e66bec 100644 --- a/pkg/locking/control_plane_init_mutex.go +++ b/pkg/locking/control_plane_init_mutex.go @@ -22,8 +22,7 @@ import ( "fmt" "github.com/go-logr/logr" - "github.com/pkg/errors" - apicorev1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -132,11 +131,11 @@ type information struct { } type semaphore struct { - *apicorev1.ConfigMap + *corev1.ConfigMap } func newSemaphore() *semaphore { - return &semaphore{&apicorev1.ConfigMap{}} + return &semaphore{&corev1.ConfigMap{}} } func configMapName(clusterName string) string { @@ -146,7 +145,7 @@ func configMapName(clusterName string) string { func (s semaphore) information() (*information, error) { li := &information{} if err := json.Unmarshal([]byte(s.Data[semaphoreInformationKey]), li); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal semaphore information") + return nil, fmt.Errorf("failed to unmarshal semaphore information: %w", err) } return li, nil } @@ -154,7 +153,7 @@ func (s semaphore) information() (*information, error) { func (s semaphore) setInformation(information *information) error { b, err := json.Marshal(information) if err != nil { - return errors.Wrap(err, "failed to marshal semaphore information") + return fmt.Errorf("failed to marshal semaphore information: %w", err) } s.Data = map[string]string{} s.Data[semaphoreInformationKey] = string(b) diff --git a/pkg/machinefilters/machine_filters.go b/pkg/machinefilters/machine_filters.go index a3676235..a05d43a7 100644 --- a/pkg/machinefilters/machine_filters.go +++ b/pkg/machinefilters/machine_filters.go @@ -23,12 +23,12 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/selection" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client" controlplanev1 "github.com/cluster-api-provider-k3s/cluster-api-k3s/controlplane/api/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" ) type Func func(machine *clusterv1.Machine) bool @@ -100,7 +100,7 @@ func InFailureDomains(failureDomains ...*string) Func { // OwnedMachines returns a filter to find all owned control plane machines. // Usage: managementCluster.GetMachinesForCluster(ctx, cluster, machinefilters.OwnedMachines(controlPlane)) -func OwnedMachines(owner controllerutil.Object) func(machine *clusterv1.Machine) bool { +func OwnedMachines(owner client.Object) func(machine *clusterv1.Machine) bool { return func(machine *clusterv1.Machine) bool { if machine == nil { return false @@ -184,13 +184,14 @@ func HasAnnotationKey(key string) Func { } // ControlPlaneSelectorForCluster returns the label selector necessary to get control plane machines for a given cluster. -func ControlPlaneSelectorForCluster(clusterName string) labels.Selector { +func ControlPlaneSelectorForCluster(clusterName string) labels.Selector { //nolint:ireturn must := func(r *labels.Requirement, err error) labels.Requirement { if err != nil { panic(err) } return *r } + return labels.NewSelector().Add( must(labels.NewRequirement(clusterv1.ClusterLabelName, selection.Equals, []string{clusterName})), must(labels.NewRequirement(clusterv1.MachineControlPlaneLabelName, selection.Exists, []string{})), @@ -210,7 +211,6 @@ func MatchesKCPConfiguration(infraConfigs map[string]*unstructured.Unstructured, // MatchesTemplateClonedFrom returns a filter to find all machines that match a given KCP infra template. func MatchesTemplateClonedFrom(infraConfigs map[string]*unstructured.Unstructured, kcp *controlplanev1.KThreesControlPlane) Func { return func(machine *clusterv1.Machine) bool { - if machine == nil { return false } @@ -257,18 +257,3 @@ func MatchesKThreesBootstrapConfig(machineConfigs map[string]*bootstrapv1.KThree return true } } - -// getAdjustedKcpConfig takes the KThreesConfigSpec from KCP and applies the transformations required -// to allow a comparison with the KThreesConfig referenced from the machine. -// NOTE: The KCP controller applies a set of transformations when creating a KThreesConfig referenced from the machine, -// mostly depending on the fact that the machine was the initial control plane node or a joining control plane node. -// In this function we don't have such information, so we are making the KThreesConfigSpec similar to the KThreesConfig. -func getAdjustedKcpConfig(kcp *controlplanev1.KThreesControlPlane, machineConfig *bootstrapv1.KThreesConfig) *bootstrapv1.KThreesConfigSpec { - - return &kcp.Spec.KThreesConfigSpec -} - -// cleanupConfigFields cleanups all the fields that are not relevant for the comparison. -func cleanupConfigFields(kcpConfig *bootstrapv1.KThreesConfigSpec, machineConfig *bootstrapv1.KThreesConfig) { - -} diff --git a/pkg/secret/certificates.go b/pkg/secret/certificates.go index 88e8d1f2..156e4bd7 100644 --- a/pkg/secret/certificates.go +++ b/pkg/secret/certificates.go @@ -24,6 +24,7 @@ import ( "crypto/x509" "crypto/x509/pkix" "encoding/hex" + "fmt" "math/big" "path/filepath" "strings" @@ -126,11 +127,11 @@ func (c Certificates) EnsureAllExist() error { return ErrMissingCertificate } if len(certificate.KeyPair.Cert) == 0 { - return errors.Wrapf(ErrMissingCrt, "for certificate: %s", certificate.Purpose) + return fmt.Errorf("for certificate %s: %w", certificate.Purpose, ErrMissingCrt) } if !certificate.External { if len(certificate.KeyPair.Key) == 0 { - return errors.Wrapf(ErrMissingKey, "for certificate: %s", certificate.Purpose) + return fmt.Errorf("for certificate %s: %w", certificate.Purpose, ErrMissingKey) } } } @@ -197,7 +198,7 @@ type Certificate struct { func (c *Certificate) Hashes() ([]string, error) { certificates, err := cert.ParseCertsPEM(c.KeyPair.Cert) if err != nil { - return nil, errors.Wrapf(err, "unable to parse %s certificate", c.Purpose) + return nil, fmt.Errorf("unable to parse %s certificate: %w", c.Purpose, err) } out := make([]string, 0) for _, c := range certificates { @@ -390,7 +391,7 @@ func newSelfSignedCACert(key *rsa.PrivateKey) (*x509.Certificate, error) { b, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, key.Public(), key) if err != nil { - return nil, errors.Wrapf(err, "failed to create self signed CA certificate: %+v", tmpl) + return nil, fmt.Errorf("failed to create self signed CA certificate %+v: %w", tmpl, err) } c, err := x509.ParseCertificate(b) diff --git a/pkg/token/token.go b/pkg/token/token.go index 70f9a896..67be9985 100644 --- a/pkg/token/token.go +++ b/pkg/token/token.go @@ -7,7 +7,7 @@ import ( ) func Random(size int) (string, error) { - token := make([]byte, size, size) + token := make([]byte, size) _, err := cryptorand.Read(token) if err != nil { return "", err