diff --git a/exp/runtime/internal/controllers/extensionconfig_controller.go b/exp/runtime/internal/controllers/extensionconfig_controller.go index 5162d9ede30b..37a85e2cd40b 100644 --- a/exp/runtime/internal/controllers/extensionconfig_controller.go +++ b/exp/runtime/internal/controllers/extensionconfig_controller.go @@ -27,6 +27,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" @@ -37,7 +38,6 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1" - tlog "sigs.k8s.io/cluster-api/internal/log" runtimeclient "sigs.k8s.io/cluster-api/internal/runtime/client" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/conditions" @@ -185,7 +185,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, extensionConfig *runti log := ctrl.LoggerFrom(ctx) log.Info("Unregistering ExtensionConfig information from registry") if err := r.RuntimeClient.Unregister(extensionConfig); err != nil { - return ctrl.Result{}, errors.Wrapf(err, "failed to unregister %s", tlog.KObj{Obj: extensionConfig}) + return ctrl.Result{}, errors.Wrapf(err, "failed to unregister ExtensionConfig %s", klog.KObj(extensionConfig)) } return ctrl.Result{}, nil } @@ -221,7 +221,7 @@ func discoverExtensionConfig(ctx context.Context, runtimeClient runtimeclient.Cl if err != nil { modifiedExtensionConfig := extensionConfig.DeepCopy() conditions.MarkFalse(modifiedExtensionConfig, runtimev1.RuntimeExtensionDiscoveredCondition, runtimev1.DiscoveryFailedReason, clusterv1.ConditionSeverityError, "error in discovery: %v", err) - return modifiedExtensionConfig, errors.Wrapf(err, "failed to discover %s", tlog.KObj{Obj: extensionConfig}) + return modifiedExtensionConfig, errors.Wrapf(err, "failed to discover ExtensionConfig %s", klog.KObj(extensionConfig)) } conditions.MarkTrue(discoveredExtension, runtimev1.RuntimeExtensionDiscoveredCondition) diff --git a/exp/topology/desiredstate/desired_state.go b/exp/topology/desiredstate/desired_state.go index d3925ef355b2..c80e5c5a3091 100644 --- a/exp/topology/desiredstate/desired_state.go +++ b/exp/topology/desiredstate/desired_state.go @@ -26,7 +26,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/klog/v2" "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -40,7 +42,6 @@ import ( "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/patches" "sigs.k8s.io/cluster-api/internal/hooks" - tlog "sigs.k8s.io/cluster-api/internal/log" runtimeclient "sigs.k8s.io/cluster-api/internal/runtime/client" "sigs.k8s.io/cluster-api/internal/topology/clustershim" topologynames "sigs.k8s.io/cluster-api/internal/topology/names" @@ -399,7 +400,7 @@ func (g *generator) computeControlPlane(ctx context.Context, s *scope.Scope, inf // The version is calculated using the state of the current machine deployments, the current control plane // and the version defined in the topology. func (g *generator) computeControlPlaneVersion(ctx context.Context, s *scope.Scope) (string, error) { - log := tlog.LoggerFrom(ctx) + log := ctrl.LoggerFrom(ctx) desiredVersion := s.Blueprint.Topology.Version // If we are creating the control plane object (current control plane is nil), use version from topology. if s.Current.ControlPlane == nil || s.Current.ControlPlane.Object == nil { @@ -477,7 +478,7 @@ func (g *generator) computeControlPlaneVersion(ctx context.Context, s *scope.Sco // change the UpgradeTracker accordingly, otherwise the hook call is completed and we // can remove this hook from the list of pending-hooks. if hookResponse.RetryAfterSeconds != 0 { - log.Infof("MachineDeployments/MachinePools upgrade to version %q are blocked by %q hook", desiredVersion, runtimecatalog.HookName(runtimehooksv1.AfterControlPlaneUpgrade)) + log.Info(fmt.Sprintf("MachineDeployments/MachinePools upgrade to version %q are blocked by %q hook", desiredVersion, runtimecatalog.HookName(runtimehooksv1.AfterControlPlaneUpgrade))) } else { if err := hooks.MarkAsDone(ctx, g.Client, s.Current.Cluster, runtimehooksv1.AfterControlPlaneUpgrade); err != nil { return "", err @@ -527,7 +528,7 @@ func (g *generator) computeControlPlaneVersion(ctx context.Context, s *scope.Sco s.HookResponseTracker.Add(runtimehooksv1.BeforeClusterUpgrade, hookResponse) if hookResponse.RetryAfterSeconds != 0 { // Cannot pickup the new version right now. Need to try again later. - log.Infof("Cluster upgrade to version %q is blocked by %q hook", desiredVersion, runtimecatalog.HookName(runtimehooksv1.BeforeClusterUpgrade)) + log.Info(fmt.Sprintf("Cluster upgrade to version %q is blocked by %q hook", desiredVersion, runtimecatalog.HookName(runtimehooksv1.BeforeClusterUpgrade))) return *currentVersion, nil } @@ -627,7 +628,7 @@ func (g *generator) computeMachineDeployment(ctx context.Context, s *scope.Scope className := machineDeploymentTopology.Class machineDeploymentBlueprint, ok := s.Blueprint.MachineDeployments[className] if !ok { - return nil, errors.Errorf("MachineDeployment class %s not found in %s", className, tlog.KObj{Obj: s.Blueprint.ClusterClass}) + return nil, errors.Errorf("MachineDeployment class %s not found in ClusterClass %s", className, klog.KObj(s.Blueprint.ClusterClass)) } var machineDeploymentClass *clusterv1.MachineDeploymentClass @@ -638,7 +639,7 @@ func (g *generator) computeMachineDeployment(ctx context.Context, s *scope.Scope } } if machineDeploymentClass == nil { - return nil, errors.Errorf("MachineDeployment class %s not found in %s", className, tlog.KObj{Obj: s.Blueprint.ClusterClass}) + return nil, errors.Errorf("MachineDeployment class %s not found in ClusterClass %s", className, klog.KObj(s.Blueprint.ClusterClass)) } // Compute the bootstrap template. @@ -952,7 +953,7 @@ func (g *generator) computeMachinePool(_ context.Context, s *scope.Scope, machin className := machinePoolTopology.Class machinePoolBlueprint, ok := s.Blueprint.MachinePools[className] if !ok { - return nil, errors.Errorf("MachinePool class %s not found in %s", className, tlog.KObj{Obj: s.Blueprint.ClusterClass}) + return nil, errors.Errorf("MachinePool class %s not found in ClusterClass %s", className, klog.KObj(s.Blueprint.ClusterClass)) } var machinePoolClass *clusterv1.MachinePoolClass @@ -963,7 +964,7 @@ func (g *generator) computeMachinePool(_ context.Context, s *scope.Scope, machin } } if machinePoolClass == nil { - return nil, errors.Errorf("MachinePool class %s not found in %s", className, tlog.KObj{Obj: s.Blueprint.ClusterClass}) + return nil, errors.Errorf("MachinePool class %s not found in ClusterClass %s", className, klog.KObj(s.Blueprint.ClusterClass)) } // Compute the bootstrap config. diff --git a/internal/controllers/clusterclass/clusterclass_controller.go b/internal/controllers/clusterclass/clusterclass_controller.go index 9fc0c5a0ef11..7d908d1524fd 100644 --- a/internal/controllers/clusterclass/clusterclass_controller.go +++ b/internal/controllers/clusterclass/clusterclass_controller.go @@ -32,6 +32,7 @@ import ( kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -44,7 +45,6 @@ import ( runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" "sigs.k8s.io/cluster-api/feature" - tlog "sigs.k8s.io/cluster-api/internal/log" runtimeclient "sigs.k8s.io/cluster-api/internal/runtime/client" "sigs.k8s.io/cluster-api/internal/topology/variables" "sigs.k8s.io/cluster-api/util/annotations" @@ -391,7 +391,7 @@ func (r *Reconciler) reconcileExternal(ctx context.Context, clusterClass *cluste // Set external object ControllerReference to the ClusterClass. if err := controllerutil.SetOwnerReference(clusterClass, obj, r.Client.Scheme()); err != nil { - return errors.Wrapf(err, "failed to set ClusterClass owner reference for %s", tlog.KObj{Obj: obj}) + return errors.Wrapf(err, "failed to set ClusterClass owner reference for %s %s", obj.GetKind(), klog.KObj(obj)) } // Patch the external object. diff --git a/internal/controllers/clusterclass/clusterclass_controller_test.go b/internal/controllers/clusterclass/clusterclass_controller_test.go index 18930c7f3fc9..0bdf6ebb16de 100644 --- a/internal/controllers/clusterclass/clusterclass_controller_test.go +++ b/internal/controllers/clusterclass/clusterclass_controller_test.go @@ -32,6 +32,7 @@ import ( "k8s.io/apimachinery/pkg/util/version" utilversion "k8s.io/apiserver/pkg/util/version" utilfeature "k8s.io/component-base/featuregate/testing" + "k8s.io/klog/v2" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -42,7 +43,6 @@ import ( runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" "sigs.k8s.io/cluster-api/feature" - tlog "sigs.k8s.io/cluster-api/internal/log" fakeruntimeclient "sigs.k8s.io/cluster-api/internal/runtime/client/fake" "sigs.k8s.io/cluster-api/internal/test/builder" ) @@ -384,7 +384,7 @@ func assertHasOwnerReference(obj client.Object, ownerRef metav1.OwnerReference) } } if !found { - return fmt.Errorf("object %s does not have OwnerReference %s", tlog.KObj{Obj: obj}, &ownerRef) + return fmt.Errorf("%s does not have OwnerReference %s", klog.KObj(obj), &ownerRef) } return nil } diff --git a/internal/controllers/topology/cluster/blueprint.go b/internal/controllers/topology/cluster/blueprint.go index c618668573d8..dfa8ddbbddbf 100644 --- a/internal/controllers/topology/cluster/blueprint.go +++ b/internal/controllers/topology/cluster/blueprint.go @@ -20,10 +20,10 @@ import ( "context" "github.com/pkg/errors" + "k8s.io/klog/v2" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/exp/topology/scope" - tlog "sigs.k8s.io/cluster-api/internal/log" ) // getBlueprint gets a ClusterBlueprint with the ClusterClass and the referenced templates to be used for a managed Cluster topology. @@ -41,21 +41,21 @@ func (r *Reconciler) getBlueprint(ctx context.Context, cluster *clusterv1.Cluste // Get ClusterClass.spec.infrastructure. blueprint.InfrastructureClusterTemplate, err = r.getReference(ctx, blueprint.ClusterClass.Spec.Infrastructure.Ref) if err != nil { - return nil, errors.Wrapf(err, "failed to get infrastructure cluster template for %s", tlog.KObj{Obj: blueprint.ClusterClass}) + return nil, errors.Wrapf(err, "failed to get infrastructure cluster template for ClusterClass %s", klog.KObj(blueprint.ClusterClass)) } // Get ClusterClass.spec.controlPlane. blueprint.ControlPlane = &scope.ControlPlaneBlueprint{} blueprint.ControlPlane.Template, err = r.getReference(ctx, blueprint.ClusterClass.Spec.ControlPlane.Ref) if err != nil { - return nil, errors.Wrapf(err, "failed to get control plane template for %s", tlog.KObj{Obj: blueprint.ClusterClass}) + return nil, errors.Wrapf(err, "failed to get control plane template for ClusterClass %s", klog.KObj(blueprint.ClusterClass)) } // If the clusterClass mandates the controlPlane has infrastructureMachines, read it. if blueprint.HasControlPlaneInfrastructureMachine() { blueprint.ControlPlane.InfrastructureMachineTemplate, err = r.getReference(ctx, blueprint.ClusterClass.Spec.ControlPlane.MachineInfrastructure.Ref) if err != nil { - return nil, errors.Wrapf(err, "failed to get control plane's machine template for %s", tlog.KObj{Obj: blueprint.ClusterClass}) + return nil, errors.Wrapf(err, "failed to get control plane's machine template for ClusterClass %s", klog.KObj(blueprint.ClusterClass)) } } @@ -77,13 +77,13 @@ func (r *Reconciler) getBlueprint(ctx context.Context, cluster *clusterv1.Cluste // Get the infrastructure machine template. machineDeploymentBlueprint.InfrastructureMachineTemplate, err = r.getReference(ctx, machineDeploymentClass.Template.Infrastructure.Ref) if err != nil { - return nil, errors.Wrapf(err, "failed to get infrastructure machine template for %s, MachineDeployment class %q", tlog.KObj{Obj: blueprint.ClusterClass}, machineDeploymentClass.Class) + return nil, errors.Wrapf(err, "failed to get infrastructure machine template for ClusterClass %s, MachineDeployment class %q", klog.KObj(blueprint.ClusterClass), machineDeploymentClass.Class) } // Get the bootstrap config template. machineDeploymentBlueprint.BootstrapTemplate, err = r.getReference(ctx, machineDeploymentClass.Template.Bootstrap.Ref) if err != nil { - return nil, errors.Wrapf(err, "failed to get bootstrap config template for %s, MachineDeployment class %q", tlog.KObj{Obj: blueprint.ClusterClass}, machineDeploymentClass.Class) + return nil, errors.Wrapf(err, "failed to get bootstrap config template for ClusterClass %s, MachineDeployment class %q", klog.KObj(blueprint.ClusterClass), machineDeploymentClass.Class) } // If the machineDeploymentClass defines a MachineHealthCheck add it to the blueprint. @@ -106,13 +106,13 @@ func (r *Reconciler) getBlueprint(ctx context.Context, cluster *clusterv1.Cluste // Get the InfrastructureMachinePoolTemplate. machinePoolBlueprint.InfrastructureMachinePoolTemplate, err = r.getReference(ctx, machinePoolClass.Template.Infrastructure.Ref) if err != nil { - return nil, errors.Wrapf(err, "failed to get InfrastructureMachinePoolTemplate for %s, MachinePool class %q", tlog.KObj{Obj: blueprint.ClusterClass}, machinePoolClass.Class) + return nil, errors.Wrapf(err, "failed to get InfrastructureMachinePoolTemplate for ClusterClass %s, MachinePool class %q", klog.KObj(blueprint.ClusterClass), machinePoolClass.Class) } // Get the bootstrap config template. machinePoolBlueprint.BootstrapTemplate, err = r.getReference(ctx, machinePoolClass.Template.Bootstrap.Ref) if err != nil { - return nil, errors.Wrapf(err, "failed to get bootstrap config for %s, MachinePool class %q", tlog.KObj{Obj: blueprint.ClusterClass}, machinePoolClass.Class) + return nil, errors.Wrapf(err, "failed to get bootstrap config for ClusterClass %s, MachinePool class %q", klog.KObj(blueprint.ClusterClass), machinePoolClass.Class) } blueprint.MachinePools[machinePoolClass.Class] = machinePoolBlueprint diff --git a/internal/controllers/topology/cluster/cluster_controller.go b/internal/controllers/topology/cluster/cluster_controller.go index ae03209bd875..2aadc123bcaf 100644 --- a/internal/controllers/topology/cluster/cluster_controller.go +++ b/internal/controllers/topology/cluster/cluster_controller.go @@ -45,7 +45,6 @@ import ( "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/structuredmerge" "sigs.k8s.io/cluster-api/internal/hooks" - tlog "sigs.k8s.io/cluster-api/internal/log" runtimeclient "sigs.k8s.io/cluster-api/internal/runtime/client" "sigs.k8s.io/cluster-api/internal/util/ssa" "sigs.k8s.io/cluster-api/internal/webhooks" @@ -316,7 +315,7 @@ func (r *Reconciler) setupDynamicWatches(ctx context.Context, s *scope.Scope) er func (r *Reconciler) callBeforeClusterCreateHook(ctx context.Context, s *scope.Scope) (reconcile.Result, error) { // If the cluster objects (InfraCluster, ControlPlane, etc) are not yet created we are in the creation phase. // Call the BeforeClusterCreate hook before proceeding. - log := tlog.LoggerFrom(ctx) + log := ctrl.LoggerFrom(ctx) if s.Current.Cluster.Spec.InfrastructureRef == nil && s.Current.Cluster.Spec.ControlPlaneRef == nil { hookRequest := &runtimehooksv1.BeforeClusterCreateRequest{ Cluster: *s.Current.Cluster, @@ -327,7 +326,7 @@ func (r *Reconciler) callBeforeClusterCreateHook(ctx context.Context, s *scope.S } s.HookResponseTracker.Add(runtimehooksv1.BeforeClusterCreate, hookResponse) if hookResponse.RetryAfterSeconds != 0 { - log.Infof("Creation of Cluster topology is blocked by %s hook", runtimecatalog.HookName(runtimehooksv1.BeforeClusterCreate)) + log.Info(fmt.Sprintf("Creation of Cluster topology is blocked by %s hook", runtimecatalog.HookName(runtimehooksv1.BeforeClusterCreate))) return ctrl.Result{RequeueAfter: time.Duration(hookResponse.RetryAfterSeconds) * time.Second}, nil } } @@ -402,7 +401,7 @@ func (r *Reconciler) machinePoolToCluster(_ context.Context, o client.Object) [] func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Cluster) (ctrl.Result, error) { // Call the BeforeClusterDelete hook if the 'ok-to-delete' annotation is not set // and add the annotation to the cluster after receiving a successful non-blocking response. - log := tlog.LoggerFrom(ctx) + log := ctrl.LoggerFrom(ctx) if feature.Gates.Enabled(feature.RuntimeSDK) { if !hooks.IsOkToDelete(cluster) { hookRequest := &runtimehooksv1.BeforeClusterDeleteRequest{ @@ -413,7 +412,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu return ctrl.Result{}, err } if hookResponse.RetryAfterSeconds != 0 { - log.Infof("Cluster deletion is blocked by %q hook", runtimecatalog.HookName(runtimehooksv1.BeforeClusterDelete)) + log.Info(fmt.Sprintf("Cluster deletion is blocked by %q hook", runtimecatalog.HookName(runtimehooksv1.BeforeClusterDelete))) return ctrl.Result{RequeueAfter: time.Duration(hookResponse.RetryAfterSeconds) * time.Second}, nil } // The BeforeClusterDelete hook returned a non-blocking response. Now the cluster is ready to be deleted. diff --git a/internal/controllers/topology/cluster/current_state.go b/internal/controllers/topology/cluster/current_state.go index fb359b9af917..48859cccae6c 100644 --- a/internal/controllers/topology/cluster/current_state.go +++ b/internal/controllers/topology/cluster/current_state.go @@ -25,13 +25,13 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/exp/topology/scope" "sigs.k8s.io/cluster-api/internal/contract" - tlog "sigs.k8s.io/cluster-api/internal/log" "sigs.k8s.io/cluster-api/util/labels" ) @@ -86,17 +86,17 @@ func (r *Reconciler) getCurrentState(ctx context.Context, s *scope.Scope) (*scop func (r *Reconciler) getCurrentInfrastructureClusterState(ctx context.Context, blueprintInfrastructureClusterTemplate *unstructured.Unstructured, cluster *clusterv1.Cluster) (*unstructured.Unstructured, error) { ref, err := alignRefAPIVersion(blueprintInfrastructureClusterTemplate, cluster.Spec.InfrastructureRef) if err != nil { - return nil, errors.Wrapf(err, "failed to read %s", tlog.KRef{Ref: cluster.Spec.InfrastructureRef}) + return nil, errors.Wrapf(err, "failed to read %s %s", cluster.Spec.InfrastructureRef.Kind, klog.KRef(cluster.Spec.InfrastructureRef.Namespace, cluster.Spec.InfrastructureRef.Name)) } infra, err := r.getReference(ctx, ref) if err != nil { - return nil, errors.Wrapf(err, "failed to read %s", tlog.KRef{Ref: cluster.Spec.InfrastructureRef}) + return nil, errors.Wrapf(err, "failed to read %s %s", cluster.Spec.InfrastructureRef.Kind, klog.KRef(cluster.Spec.InfrastructureRef.Namespace, cluster.Spec.InfrastructureRef.Name)) } // check that the referenced object has the ClusterTopologyOwnedLabel label. // Nb. This is to make sure that a managed topology cluster does not have a reference to an object that is not // owned by the topology. if !labels.IsTopologyOwned(infra) { - return nil, fmt.Errorf("infra cluster object %s referenced from cluster %s is not topology owned", tlog.KObj{Obj: infra}, tlog.KObj{Obj: cluster}) + return nil, fmt.Errorf("%s %s referenced from Cluster %s is not topology owned", infra.GetKind(), klog.KObj(infra), klog.KObj(cluster)) } return infra, nil } @@ -111,17 +111,17 @@ func (r *Reconciler) getCurrentControlPlaneState(ctx context.Context, blueprintC // Get the control plane object. ref, err := alignRefAPIVersion(blueprintControlPlane.Template, cluster.Spec.ControlPlaneRef) if err != nil { - return nil, errors.Wrapf(err, "failed to read %s", tlog.KRef{Ref: cluster.Spec.ControlPlaneRef}) + return nil, errors.Wrapf(err, "failed to read %s %s", cluster.Spec.ControlPlaneRef.Kind, klog.KRef(cluster.Spec.ControlPlaneRef.Namespace, cluster.Spec.ControlPlaneRef.Name)) } res.Object, err = r.getReference(ctx, ref) if err != nil { - return nil, errors.Wrapf(err, "failed to read %s", tlog.KRef{Ref: cluster.Spec.ControlPlaneRef}) + return nil, errors.Wrapf(err, "failed to read %s %s", cluster.Spec.ControlPlaneRef.Kind, klog.KRef(cluster.Spec.ControlPlaneRef.Namespace, cluster.Spec.ControlPlaneRef.Name)) } // check that the referenced object has the ClusterTopologyOwnedLabel label. // Nb. This is to make sure that a managed topology cluster does not have a reference to an object that is not // owned by the topology. if !labels.IsTopologyOwned(res.Object) { - return nil, fmt.Errorf("control plane object %s referenced from cluster %s is not topology owned", tlog.KObj{Obj: res.Object}, tlog.KObj{Obj: cluster}) + return nil, fmt.Errorf("%s %s referenced from Cluster %s is not topology owned", res.Object.GetKind(), klog.KObj(res.Object), klog.KObj(cluster)) } // If the clusterClass does not mandate the controlPlane has infrastructureMachines, return. @@ -132,21 +132,21 @@ func (r *Reconciler) getCurrentControlPlaneState(ctx context.Context, blueprintC // Otherwise, get the control plane machine infrastructureMachine template. machineInfrastructureRef, err := contract.ControlPlane().MachineTemplate().InfrastructureRef().Get(res.Object) if err != nil { - return res, errors.Wrapf(err, "failed to get InfrastructureMachineTemplate reference for %s", tlog.KObj{Obj: res.Object}) + return res, errors.Wrapf(err, "failed to get InfrastructureMachineTemplate reference for %s %s", res.Object.GetKind(), klog.KObj(res.Object)) } ref, err = alignRefAPIVersion(blueprintControlPlane.InfrastructureMachineTemplate, machineInfrastructureRef) if err != nil { - return nil, errors.Wrapf(err, "failed to get InfrastructureMachineTemplate for %s", tlog.KObj{Obj: res.Object}) + return nil, errors.Wrapf(err, "failed to get InfrastructureMachineTemplate for %s %s", res.Object.GetKind(), klog.KObj(res.Object)) } res.InfrastructureMachineTemplate, err = r.getReference(ctx, ref) if err != nil { - return nil, errors.Wrapf(err, "failed to get InfrastructureMachineTemplate for %s", tlog.KObj{Obj: res.Object}) + return nil, errors.Wrapf(err, "failed to get InfrastructureMachineTemplate for %s %s", res.Object.GetKind(), klog.KObj(res.Object)) } // check that the referenced object has the ClusterTopologyOwnedLabel label. // Nb. This is to make sure that a managed topology cluster does not have a reference to an object that is not // owned by the topology. if !labels.IsTopologyOwned(res.InfrastructureMachineTemplate) { - return nil, fmt.Errorf("control plane InfrastructureMachineTemplate object %s referenced from cluster %s is not topology owned", tlog.KObj{Obj: res.InfrastructureMachineTemplate}, tlog.KObj{Obj: cluster}) + return nil, fmt.Errorf("%s %s referenced from %s %s is not topology owned", res.InfrastructureMachineTemplate.GetKind(), klog.KObj(res.InfrastructureMachineTemplate), res.Object.GetKind(), klog.KObj(res.Object)) } mhc := &clusterv1.MachineHealthCheck{} @@ -156,7 +156,7 @@ func (r *Reconciler) getCurrentControlPlaneState(ctx context.Context, blueprintC if apierrors.IsNotFound(err) { return res, nil } - return nil, errors.Wrapf(err, "failed to get MachineHealthCheck for %s", tlog.KObj{Obj: res.Object}) + return nil, errors.Wrapf(err, "failed to get MachineHealthCheck for %s %s", res.Object.GetKind(), klog.KObj(res.Object)) } res.MachineHealthCheck = mhc return res, nil @@ -194,25 +194,25 @@ func (r *Reconciler) getCurrentMachineDeploymentState(ctx context.Context, bluep // from a well-defined label. mdTopologyName, ok := m.ObjectMeta.Labels[clusterv1.ClusterTopologyMachineDeploymentNameLabel] if !ok || mdTopologyName == "" { - return nil, fmt.Errorf("failed to find label %s in %s", clusterv1.ClusterTopologyMachineDeploymentNameLabel, tlog.KObj{Obj: m}) + return nil, fmt.Errorf("failed to find label %s in MachineDeployment %s", clusterv1.ClusterTopologyMachineDeploymentNameLabel, klog.KObj(m)) } // Make sure that the name of the MachineDeployment stays unique. // If we've already seen a MachineDeployment with the same name // this is an error, probably caused from manual modifications or a race condition. if _, ok := state[mdTopologyName]; ok { - return nil, fmt.Errorf("duplicate %s found for label %s: %s", tlog.KObj{Obj: m}, clusterv1.ClusterTopologyMachineDeploymentNameLabel, mdTopologyName) + return nil, fmt.Errorf("duplicate MachineDeployment %s found for label %s: %s", klog.KObj(m), clusterv1.ClusterTopologyMachineDeploymentNameLabel, mdTopologyName) } // Gets the bootstrapRef. bootstrapRef := m.Spec.Template.Spec.Bootstrap.ConfigRef if bootstrapRef == nil { - return nil, fmt.Errorf("%s does not have a reference to a Bootstrap Config", tlog.KObj{Obj: m}) + return nil, fmt.Errorf("MachineDeployment %s does not have a reference to a Bootstrap Config", klog.KObj(m)) } // Gets the infraRef. infraRef := &m.Spec.Template.Spec.InfrastructureRef if infraRef.Name == "" { - return nil, fmt.Errorf("%s does not have a reference to a InfrastructureMachineTemplate", tlog.KObj{Obj: m}) + return nil, fmt.Errorf("MachineDeployment %s does not have a reference to a InfrastructureMachineTemplate", klog.KObj(m)) } // If the mdTopology exists in the Cluster, lookup the corresponding mdBluePrint and align @@ -226,36 +226,36 @@ func (r *Reconciler) getCurrentMachineDeploymentState(ctx context.Context, bluep } bootstrapRef, err = alignRefAPIVersion(mdBluePrint.BootstrapTemplate, bootstrapRef) if err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("%s Bootstrap reference could not be retrieved", tlog.KObj{Obj: m})) + return nil, errors.Wrap(err, fmt.Sprintf("MachineDeployment %s Bootstrap reference could not be retrieved", klog.KObj(m))) } infraRef, err = alignRefAPIVersion(mdBluePrint.InfrastructureMachineTemplate, infraRef) if err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("%s Infrastructure reference could not be retrieved", tlog.KObj{Obj: m})) + return nil, errors.Wrap(err, fmt.Sprintf("MachineDeployment %s Infrastructure reference could not be retrieved", klog.KObj(m))) } } // Get the BootstrapTemplate. bootstrapTemplate, err := r.getReference(ctx, bootstrapRef) if err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("%s Bootstrap reference could not be retrieved", tlog.KObj{Obj: m})) + return nil, errors.Wrap(err, fmt.Sprintf("MachineDeployment %s Bootstrap reference could not be retrieved", klog.KObj(m))) } // check that the referenced object has the ClusterTopologyOwnedLabel label. // Nb. This is to make sure that a managed topology cluster does not have a reference to an object that is not // owned by the topology. if !labels.IsTopologyOwned(bootstrapTemplate) { - return nil, fmt.Errorf("BootstrapTemplate object %s referenced from MD %s is not topology owned", tlog.KObj{Obj: bootstrapTemplate}, tlog.KObj{Obj: m}) + return nil, fmt.Errorf("%s %s referenced from MachineDeployment %s is not topology owned", bootstrapTemplate.GetKind(), klog.KObj(bootstrapTemplate), klog.KObj(m)) } // Get the InfraMachineTemplate. infraMachineTemplate, err := r.getReference(ctx, infraRef) if err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("%s Infrastructure reference could not be retrieved", tlog.KObj{Obj: m})) + return nil, errors.Wrap(err, fmt.Sprintf("MachineDeployment %s Infrastructure reference could not be retrieved", klog.KObj(m))) } // check that the referenced object has the ClusterTopologyOwnedLabel label. // Nb. This is to make sure that a managed topology cluster does not have a reference to an object that is not // owned by the topology. if !labels.IsTopologyOwned(infraMachineTemplate) { - return nil, fmt.Errorf("InfrastructureMachineTemplate object %s referenced from MD %s is not topology owned", tlog.KObj{Obj: infraMachineTemplate}, tlog.KObj{Obj: m}) + return nil, fmt.Errorf("%s %s referenced from MachineDeployment %s is not topology owned", infraMachineTemplate.GetKind(), klog.KObj(infraMachineTemplate), klog.KObj(m)) } // Gets the MachineHealthCheck. @@ -267,7 +267,7 @@ func (r *Reconciler) getCurrentMachineDeploymentState(ctx context.Context, bluep // Each MachineDeployment isn't required to have a MachineHealthCheck. Ignore the error if it's of the type not found, but return any other error. if !apierrors.IsNotFound(err) { - return nil, errors.Wrap(err, fmt.Sprintf("failed to get MachineHealthCheck for %s", tlog.KObj{Obj: m})) + return nil, errors.Wrap(err, fmt.Sprintf("failed to get MachineHealthCheck for MachineDeployment %s", klog.KObj(m))) } } @@ -313,25 +313,25 @@ func (r *Reconciler) getCurrentMachinePoolState(ctx context.Context, blueprintMa // from a well-defined label. mpTopologyName, ok := m.ObjectMeta.Labels[clusterv1.ClusterTopologyMachinePoolNameLabel] if !ok || mpTopologyName == "" { - return nil, fmt.Errorf("failed to find label %s in %s", clusterv1.ClusterTopologyMachinePoolNameLabel, tlog.KObj{Obj: m}) + return nil, fmt.Errorf("failed to find label %s in MachinePool %s", clusterv1.ClusterTopologyMachinePoolNameLabel, klog.KObj(m)) } // Make sure that the name of the MachinePool stays unique. // If we've already seen a MachinePool with the same name // this is an error, probably caused from manual modifications or a race condition. if _, ok := state[mpTopologyName]; ok { - return nil, fmt.Errorf("duplicate %s found for label %s: %s", tlog.KObj{Obj: m}, clusterv1.ClusterTopologyMachinePoolNameLabel, mpTopologyName) + return nil, fmt.Errorf("duplicate MachinePool %s found for label %s: %s", klog.KObj(m), clusterv1.ClusterTopologyMachinePoolNameLabel, mpTopologyName) } // Gets the bootstrapRef. bootstrapRef := m.Spec.Template.Spec.Bootstrap.ConfigRef if bootstrapRef == nil { - return nil, fmt.Errorf("%s does not have a reference to a Bootstrap Config", tlog.KObj{Obj: m}) + return nil, fmt.Errorf("MachinePool %s does not have a reference to a Bootstrap Config", klog.KObj(m)) } // Gets the infraRef. infraRef := &m.Spec.Template.Spec.InfrastructureRef if infraRef.Name == "" { - return nil, fmt.Errorf("%s does not have a reference to a InfrastructureMachinePool", tlog.KObj{Obj: m}) + return nil, fmt.Errorf("MachinePool %s does not have a reference to a InfrastructureMachinePool", klog.KObj(m)) } // If the mpTopology exists in the Cluster, lookup the corresponding mpBluePrint and align @@ -345,36 +345,36 @@ func (r *Reconciler) getCurrentMachinePoolState(ctx context.Context, blueprintMa } bootstrapRef, err = alignRefAPIVersion(mpBluePrint.BootstrapTemplate, bootstrapRef) if err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("%s Bootstrap reference could not be retrieved", tlog.KObj{Obj: m})) + return nil, errors.Wrap(err, fmt.Sprintf("MachinePool %s Bootstrap reference could not be retrieved", klog.KObj(m))) } infraRef, err = alignRefAPIVersion(mpBluePrint.InfrastructureMachinePoolTemplate, infraRef) if err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("%s Infrastructure reference could not be retrieved", tlog.KObj{Obj: m})) + return nil, errors.Wrap(err, fmt.Sprintf("MachinePool %s Infrastructure reference could not be retrieved", klog.KObj(m))) } } // Get the BootstrapObject bootstrapObject, err := r.getReference(ctx, bootstrapRef) if err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("%s Bootstrap reference could not be retrieved", tlog.KObj{Obj: m})) + return nil, errors.Wrap(err, fmt.Sprintf("MachinePool %s Bootstrap reference could not be retrieved", klog.KObj(m))) } // check that the referenced object has the ClusterTopologyOwnedLabel label. // Nb. This is to make sure that a managed topology cluster does not have a reference to an object that is not // owned by the topology. if !labels.IsTopologyOwned(bootstrapObject) { - return nil, fmt.Errorf("bootstrap object %s referenced from MP %s is not topology owned", tlog.KObj{Obj: bootstrapObject}, tlog.KObj{Obj: m}) + return nil, fmt.Errorf("%s %s referenced from MachinePool %s is not topology owned", bootstrapObject.GetKind(), klog.KObj(bootstrapObject), klog.KObj(m)) } // Get the InfraMachinePoolObject. infraMachinePoolObject, err := r.getReference(ctx, infraRef) if err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("%s Infrastructure reference could not be retrieved", tlog.KObj{Obj: m})) + return nil, errors.Wrap(err, fmt.Sprintf("MachinePool %s Infrastructure reference could not be retrieved", klog.KObj(m))) } // check that the referenced object has the ClusterTopologyOwnedLabel label. // Nb. This is to make sure that a managed topology cluster does not have a reference to an object that is not // owned by the topology. if !labels.IsTopologyOwned(infraMachinePoolObject) { - return nil, fmt.Errorf("InfrastructureMachinePool object %s referenced from MP %s is not topology owned", tlog.KObj{Obj: infraMachinePoolObject}, tlog.KObj{Obj: m}) + return nil, fmt.Errorf("%s %s referenced from MachinePool %s is not topology owned", infraMachinePoolObject.GetKind(), klog.KObj(infraMachinePoolObject), klog.KObj(m)) } state[mpTopologyName] = &scope.MachinePoolState{ diff --git a/internal/controllers/topology/cluster/patches/engine.go b/internal/controllers/topology/cluster/patches/engine.go index 1195838bcd21..e21b89c4931c 100644 --- a/internal/controllers/topology/cluster/patches/engine.go +++ b/internal/controllers/topology/cluster/patches/engine.go @@ -27,6 +27,7 @@ import ( "github.com/pkg/errors" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/klog/v2" + ctrl "sigs.k8s.io/controller-runtime" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" @@ -38,7 +39,6 @@ import ( "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/patches/external" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/patches/inline" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/patches/variables" - tlog "sigs.k8s.io/cluster-api/internal/log" runtimeclient "sigs.k8s.io/cluster-api/internal/runtime/client" ) @@ -71,7 +71,7 @@ func (e *engine) Apply(ctx context.Context, blueprint *scope.ClusterBlueprint, d return nil } - log := tlog.LoggerFrom(ctx) + log := ctrl.LoggerFrom(ctx) // Create a patch generation request. req, err := createRequest(blueprint, desired) @@ -83,7 +83,8 @@ func (e *engine) Apply(ctx context.Context, blueprint *scope.ClusterBlueprint, d // respecting the order in which they are defined. for i := range blueprint.ClusterClass.Spec.Patches { clusterClassPatch := blueprint.ClusterClass.Spec.Patches[i] - ctx, log := log.WithValues("patch", clusterClassPatch.Name).Into(ctx) + log := log.WithValues("patch", clusterClassPatch.Name) + ctx := ctrl.LoggerInto(ctx, log) definitionFrom := clusterClassPatch.Name // If this isn't an external patch, use the inline patch name. @@ -93,7 +94,7 @@ func (e *engine) Apply(ctx context.Context, blueprint *scope.ClusterBlueprint, d if err := addVariablesForPatch(blueprint, desired, req, definitionFrom); err != nil { return errors.Wrapf(err, "failed to calculate variables for patch %q", clusterClassPatch.Name) } - log.V(5).Infof("Applying patch to templates") + log.V(5).Info("Applying patch to templates") // Create patch generator for the current patch. generator, err := createPatchGenerator(e.runtimeClient, &clusterClassPatch) @@ -128,9 +129,10 @@ func (e *engine) Apply(ctx context.Context, blueprint *scope.ClusterBlueprint, d continue } - ctx, log := log.WithValues("patch", clusterClassPatch.Name).Into(ctx) + log := log.WithValues("patch", clusterClassPatch.Name) + ctx := ctrl.LoggerInto(ctx, log) - log.V(5).Infof("Validating topology") + log.V(5).Info("Validating topology") validator := external.NewValidator(e.runtimeClient, &clusterClassPatch) @@ -141,7 +143,7 @@ func (e *engine) Apply(ctx context.Context, blueprint *scope.ClusterBlueprint, d } // Use patched templates to update the desired state objects. - log.V(5).Infof("Applying patched templates to desired state") + log.V(5).Info("Applying patched templates to desired state") if err := updateDesiredState(ctx, req, blueprint, desired); err != nil { return errors.Wrapf(err, "failed to apply patches to desired state") } @@ -266,8 +268,8 @@ func createRequest(blueprint *scope.ClusterBlueprint, desired *scope.ClusterStat WithHolder(desired.Cluster, clusterv1.GroupVersion.WithKind("Cluster"), "spec.infrastructureRef"). Build() if err != nil { - return nil, errors.Wrapf(err, "failed to prepare InfrastructureCluster template %s for patching", - tlog.KObj{Obj: blueprint.InfrastructureClusterTemplate}) + return nil, errors.Wrapf(err, "failed to prepare %s %s for patching", + blueprint.InfrastructureClusterTemplate.GetKind(), klog.KObj(blueprint.InfrastructureClusterTemplate)) } req.Items = append(req.Items, *t) @@ -276,8 +278,8 @@ func createRequest(blueprint *scope.ClusterBlueprint, desired *scope.ClusterStat WithHolder(desired.Cluster, clusterv1.GroupVersion.WithKind("Cluster"), "spec.controlPlaneRef"). Build() if err != nil { - return nil, errors.Wrapf(err, "failed to prepare ControlPlane template %s for patching", - tlog.KObj{Obj: blueprint.ControlPlane.Template}) + return nil, errors.Wrapf(err, "failed to prepare %s %s for patching", + blueprint.ControlPlane.Template.GetKind(), klog.KObj(blueprint.ControlPlane.Template)) } req.Items = append(req.Items, *t) @@ -288,8 +290,8 @@ func createRequest(blueprint *scope.ClusterBlueprint, desired *scope.ClusterStat WithHolder(desired.ControlPlane.Object, desired.ControlPlane.Object.GroupVersionKind(), strings.Join(contract.ControlPlane().MachineTemplate().InfrastructureRef().Path(), ".")). Build() if err != nil { - return nil, errors.Wrapf(err, "failed to prepare ControlPlane's machine template %s for patching", - tlog.KObj{Obj: blueprint.ControlPlane.InfrastructureMachineTemplate}) + return nil, errors.Wrapf(err, "failed to prepare ControlPlane's %s %s for patching", + blueprint.ControlPlane.InfrastructureMachineTemplate.GetKind(), klog.KObj(blueprint.ControlPlane.InfrastructureMachineTemplate)) } req.Items = append(req.Items, *t) } @@ -318,8 +320,8 @@ func createRequest(blueprint *scope.ClusterBlueprint, desired *scope.ClusterStat WithHolder(md.Object, clusterv1.GroupVersion.WithKind("MachineDeployment"), "spec.template.spec.bootstrap.configRef"). Build() if err != nil { - return nil, errors.Wrapf(err, "failed to prepare BootstrapConfig template %s for MachineDeployment topology %s for patching", - tlog.KObj{Obj: mdClass.BootstrapTemplate}, mdTopologyName) + return nil, errors.Wrapf(err, "failed to prepare %s %s for MachineDeployment topology %s for patching", + mdClass.BootstrapTemplate.GetKind(), klog.KObj(mdClass.BootstrapTemplate), mdTopologyName) } req.Items = append(req.Items, *t) @@ -328,8 +330,8 @@ func createRequest(blueprint *scope.ClusterBlueprint, desired *scope.ClusterStat WithHolder(md.Object, clusterv1.GroupVersion.WithKind("MachineDeployment"), "spec.template.spec.infrastructureRef"). Build() if err != nil { - return nil, errors.Wrapf(err, "failed to prepare InfrastructureMachine template %s for MachineDeployment topology %s for patching", - tlog.KObj{Obj: mdClass.InfrastructureMachineTemplate}, mdTopologyName) + return nil, errors.Wrapf(err, "failed to prepare %s %s for MachineDeployment topology %s for patching", + mdClass.InfrastructureMachineTemplate.GetKind(), klog.KObj(mdClass.InfrastructureMachineTemplate), mdTopologyName) } req.Items = append(req.Items, *t) } @@ -358,8 +360,8 @@ func createRequest(blueprint *scope.ClusterBlueprint, desired *scope.ClusterStat WithHolder(mp.Object, expv1.GroupVersion.WithKind("MachinePool"), "spec.template.spec.bootstrap.configRef"). Build() if err != nil { - return nil, errors.Wrapf(err, "failed to prepare BootstrapConfig template %s for MachinePool topology %s for patching", - tlog.KObj{Obj: mpClass.BootstrapTemplate}, mpTopologyName) + return nil, errors.Wrapf(err, "failed to prepare %s %s for MachinePool topology %s for patching", + mpClass.BootstrapTemplate.GetKind(), klog.KObj(mpClass.BootstrapTemplate), mpTopologyName) } req.Items = append(req.Items, *t) @@ -368,8 +370,8 @@ func createRequest(blueprint *scope.ClusterBlueprint, desired *scope.ClusterStat WithHolder(mp.Object, expv1.GroupVersion.WithKind("MachinePool"), "spec.template.spec.infrastructureRef"). Build() if err != nil { - return nil, errors.Wrapf(err, "failed to prepare InfrastructureMachinePoolTemplate %s for MachinePool topology %s for patching", - tlog.KObj{Obj: mpClass.InfrastructureMachinePoolTemplate}, mpTopologyName) + return nil, errors.Wrapf(err, "failed to prepare %s %s for MachinePool topology %s for patching", + mpClass.InfrastructureMachinePoolTemplate.GetKind(), klog.KObj(mpClass.InfrastructureMachinePoolTemplate), mpTopologyName) } req.Items = append(req.Items, *t) } @@ -431,11 +433,11 @@ func applyPatchesToRequest(ctx context.Context, req *runtimehooksv1.GeneratePatc } func applyPatchToRequest(ctx context.Context, req *runtimehooksv1.GeneratePatchesRequest, patch runtimehooksv1.GeneratePatchesResponseItem) (reterr error) { - log := tlog.LoggerFrom(ctx).WithValues("uid", patch.UID) + log := ctrl.LoggerFrom(ctx).WithValues("uid", patch.UID) defer func() { if r := recover(); r != nil { - log.Infof("Observed a panic when applying patch: %v\n%s", r, string(debug.Stack())) + log.Info(fmt.Sprintf("Observed a panic when applying patch: %v\n%s", r, string(debug.Stack()))) reterr = kerrors.NewAggregate([]error{reterr, fmt.Errorf("observed a panic when applying patch: %v", r)}) } }() @@ -445,7 +447,7 @@ func applyPatchToRequest(ctx context.Context, req *runtimehooksv1.GeneratePatche // If a patch doesn't have a corresponding request item, the patch is invalid. if requestItem == nil { - log.Infof("Unable to find corresponding request item with uid %q for the patch", patch.UID) + log.Info(fmt.Sprintf("Unable to find corresponding request item with uid %q for the patch", patch.UID)) return errors.Errorf("unable to find corresponding request item for patch") } @@ -455,23 +457,23 @@ func applyPatchToRequest(ctx context.Context, req *runtimehooksv1.GeneratePatche switch patch.PatchType { case runtimehooksv1.JSONPatchType: - log.V(5).Infof("Accumulating JSON patch: %s", string(patch.Patch)) + log.V(5).Info("Accumulating JSON patch", "patch", string(patch.Patch)) jsonPatch, err := jsonpatch.DecodePatch(patch.Patch) if err != nil { - log.Infof("Failed to apply patch with uid %q: error decoding json patch (RFC6902): %s: %s", requestItem.UID, string(patch.Patch), err) + log.Error(err, fmt.Sprintf("Failed to apply patch with uid %q: error decoding json patch (RFC6902)", requestItem.UID), "patch", string(patch.Patch)) return errors.Wrap(err, "failed to apply patch: error decoding json patch (RFC6902)") } patchedTemplate, err = jsonPatch.Apply(requestItem.Object.Raw) if err != nil { - log.Infof("Failed to apply patch with uid %q: error applying json patch (RFC6902): %s: %s", requestItem.UID, string(patch.Patch), err) + log.Error(err, fmt.Sprintf("Failed to apply patch with uid %q: error applying json patch (RFC6902)", requestItem.UID), "patch", string(patch.Patch)) return errors.Wrap(err, "failed to apply patch: error applying json patch (RFC6902)") } case runtimehooksv1.JSONMergePatchType: - log.V(5).Infof("Accumulating JSON merge patch: %s", string(patch.Patch)) + log.V(5).Info("Accumulating JSON merge patch", "patch", string(patch.Patch)) patchedTemplate, err = jsonpatch.MergePatch(requestItem.Object.Raw, patch.Patch) if err != nil { - log.Infof("Failed to apply patch with uid %q: error applying json merge patch (RFC7386): %s: %s", requestItem.UID, string(patch.Patch), err) + log.Error(err, fmt.Sprintf("Failed to apply patch with uid %q: error applying json merge patch (RFC7386)", requestItem.UID), "patch", string(patch.Patch)) return errors.Wrap(err, "failed to apply patch: error applying json merge patch (RFC7386)") } } @@ -479,7 +481,7 @@ func applyPatchToRequest(ctx context.Context, req *runtimehooksv1.GeneratePatche // Overwrite the spec of template.Template with the spec of the patchedTemplate, // to ensure that we only pick up changes to the spec. if err := patchTemplateSpec(&requestItem.Object, patchedTemplate); err != nil { - log.Infof("Failed to apply patch to template %s: %s", requestItem.UID, err) + log.Error(err, fmt.Sprintf("Failed to apply patch to template with uid %q", requestItem.UID)) return errors.Wrap(err, "failed to apply patch to template") } diff --git a/internal/controllers/topology/cluster/patches/patch.go b/internal/controllers/topology/cluster/patches/patch.go index c6a4b7811171..5d92b8d6a59c 100644 --- a/internal/controllers/topology/cluster/patches/patch.go +++ b/internal/controllers/topology/cluster/patches/patch.go @@ -20,15 +20,17 @@ import ( "bytes" "context" "encoding/json" + "fmt" "strings" jsonpatch "github.com/evanphx/json-patch/v5" "github.com/pkg/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/klog/v2" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/cluster-api/internal/contract" - tlog "sigs.k8s.io/cluster-api/internal/log" ) // PatchOption represents an option for the patchObject and patchTemplate funcs. @@ -77,7 +79,7 @@ func patchTemplate(ctx context.Context, template, modifiedTemplate *unstructured // patchUnstructured overwrites original.destSpecPath with modified.srcSpecPath. // NOTE: Original won't be changed at all, if there is no diff. func patchUnstructured(ctx context.Context, original, modified *unstructured.Unstructured, srcSpecPath, destSpecPath string, opts ...PatchOption) error { - log := tlog.LoggerFrom(ctx) + log := ctrl.LoggerFrom(ctx) patchOptions := &PatchOptions{} patchOptions.ApplyOptions(opts) @@ -93,13 +95,13 @@ func patchUnstructured(ctx context.Context, original, modified *unstructured.Uns destSpecPath: destSpecPath, fieldsToPreserve: patchOptions.preserveFields, }); err != nil { - return errors.Wrapf(err, "failed to apply patch to %s", tlog.KObj{Obj: original}) + return errors.Wrapf(err, "failed to apply patch to %s %s", original.GetKind(), klog.KObj(original)) } // Calculate diff. diff, err := calculateDiff(original, patched) if err != nil { - return errors.Wrapf(err, "failed to apply patch to %s: failed to calculate diff", tlog.KObj{Obj: original}) + return errors.Wrapf(err, "failed to apply patch to %s %s: failed to calculate diff", original.GetKind(), klog.KObj(original)) } // Return if there is no diff. @@ -108,7 +110,7 @@ func patchUnstructured(ctx context.Context, original, modified *unstructured.Uns } // Log the delta between the object before and after applying the accumulated patches. - log.V(4).WithObject(original).Infof("Applying accumulated patches to desired state: %s", string(diff)) + log.V(4).WithValues(original.GetKind(), klog.KObj(original)).Info(fmt.Sprintf("Applying accumulated patches to desired state of %s", original.GetKind()), "patch", string(diff)) // Overwrite original. *original = *patched @@ -185,7 +187,7 @@ func copySpec(in copySpecInput) error { // Continue if the field does not exist in src. fieldsToPreserve don't have to exist. continue } else if err != nil { - return errors.Wrapf(err, "failed to get field %q from %s", strings.Join(field, "."), tlog.KObj{Obj: in.dest}) + return errors.Wrapf(err, "failed to get field %q from %s %s", strings.Join(field, "."), in.dest.GetKind(), klog.KObj(in.dest)) } preservedFields[strings.Join(field, ".")] = value } @@ -193,20 +195,20 @@ func copySpec(in copySpecInput) error { // Get spec from src. srcSpec, found, err := unstructured.NestedFieldNoCopy(in.src.Object, strings.Split(in.srcSpecPath, ".")...) if !found { - return errors.Errorf("missing field %q in %s", in.srcSpecPath, tlog.KObj{Obj: in.src}) + return errors.Errorf("missing field %q in %s %s", in.srcSpecPath, in.src.GetKind(), klog.KObj(in.src)) } else if err != nil { - return errors.Wrapf(err, "failed to get field %q from %s", in.srcSpecPath, tlog.KObj{Obj: in.src}) + return errors.Wrapf(err, "failed to get field %q from %s %s", in.srcSpecPath, in.src.GetKind(), klog.KObj(in.src)) } // Set spec in dest. if err := unstructured.SetNestedField(in.dest.Object, srcSpec, strings.Split(in.destSpecPath, ".")...); err != nil { - return errors.Wrapf(err, "failed to set field %q on %s", in.destSpecPath, tlog.KObj{Obj: in.dest}) + return errors.Wrapf(err, "failed to set field %q on %s %s", in.destSpecPath, in.dest.GetKind(), klog.KObj(in.dest)) } // Restore preserved fields. for path, value := range preservedFields { if err := unstructured.SetNestedField(in.dest.Object, value, strings.Split(path, ".")...); err != nil { - return errors.Wrapf(err, "failed to set field %q on %s", path, tlog.KObj{Obj: in.dest}) + return errors.Wrapf(err, "failed to set field %q on %s %s", path, in.dest.GetKind(), klog.KObj(in.dest)) } } return nil diff --git a/internal/controllers/topology/cluster/reconcile_state.go b/internal/controllers/topology/cluster/reconcile_state.go index cd6de9a44299..f85ac3b544e3 100644 --- a/internal/controllers/topology/cluster/reconcile_state.go +++ b/internal/controllers/topology/cluster/reconcile_state.go @@ -43,7 +43,6 @@ import ( "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/structuredmerge" "sigs.k8s.io/cluster-api/internal/hooks" - tlog "sigs.k8s.io/cluster-api/internal/log" "sigs.k8s.io/cluster-api/internal/topology/check" "sigs.k8s.io/cluster-api/internal/topology/clustershim" topologynames "sigs.k8s.io/cluster-api/internal/topology/names" @@ -62,8 +61,8 @@ const ( // the entire reconcile operation will fail. This might be improved in the future if support for reconciling // subset of a topology will be implemented. func (r *Reconciler) reconcileState(ctx context.Context, s *scope.Scope) error { - log := tlog.LoggerFrom(ctx) - log.Infof("Reconciling state for topology owned objects") + log := ctrl.LoggerFrom(ctx) + log.Info("Reconciling state for topology owned objects") // Reconcile the Cluster shim, a temporary object used a mean to collect // objects/templates that can be orphaned in case of errors during the @@ -269,7 +268,8 @@ func (r *Reconciler) callAfterClusterUpgrade(ctx context.Context, s *scope.Scope // reconcileInfrastructureCluster reconciles the desired state of the InfrastructureCluster object. func (r *Reconciler) reconcileInfrastructureCluster(ctx context.Context, s *scope.Scope) (bool, error) { - ctx, _ = tlog.LoggerFrom(ctx).WithObject(s.Desired.InfrastructureCluster).Into(ctx) + log := ctrl.LoggerFrom(ctx).WithValues(s.Desired.InfrastructureCluster.GetKind(), klog.KObj(s.Desired.InfrastructureCluster)) + ctx = ctrl.LoggerInto(ctx, log) ignorePaths, err := contract.InfrastructureCluster().IgnorePaths(s.Desired.InfrastructureCluster) if err != nil { @@ -306,11 +306,12 @@ func (r *Reconciler) reconcileControlPlane(ctx context.Context, s *scope.Scope) // If the clusterClass mandates the controlPlane has infrastructureMachines, reconcile it. infrastructureMachineCleanupFunc := func() {} if s.Blueprint.HasControlPlaneInfrastructureMachine() { - ctx, _ := tlog.LoggerFrom(ctx).WithObject(s.Desired.ControlPlane.InfrastructureMachineTemplate).Into(ctx) + log := ctrl.LoggerFrom(ctx).WithValues(s.Desired.ControlPlane.InfrastructureMachineTemplate.GetKind(), klog.KObj(s.Desired.ControlPlane.InfrastructureMachineTemplate)) + ctx := ctrl.LoggerInto(ctx, log) cpInfraRef, err := contract.ControlPlane().MachineTemplate().InfrastructureRef().Get(s.Desired.ControlPlane.Object) if err != nil { - return false, errors.Wrapf(err, "failed to reconcile %s", tlog.KObj{Obj: s.Desired.ControlPlane.InfrastructureMachineTemplate}) + return false, errors.Wrapf(err, "failed to reconcile %s %s", s.Desired.ControlPlane.InfrastructureMachineTemplate.GetKind(), klog.KObj(s.Desired.ControlPlane.InfrastructureMachineTemplate)) } // Create or update the MachineInfrastructureTemplate of the control plane. @@ -331,10 +332,7 @@ func (r *Reconciler) reconcileControlPlane(ctx context.Context, s *scope.Scope) // Best effort cleanup of the InfrastructureMachineTemplate; // If this fails, the object will be garbage collected when the cluster is deleted. if err := r.Client.Delete(ctx, s.Desired.ControlPlane.InfrastructureMachineTemplate); err != nil { - log := tlog.LoggerFrom(ctx). - WithValues(s.Desired.ControlPlane.InfrastructureMachineTemplate.GetObjectKind().GroupVersionKind().Kind, s.Desired.ControlPlane.InfrastructureMachineTemplate.GetName()). - WithValues("err", err.Error()) - log.Infof("WARNING! Failed to cleanup InfrastructureMachineTemplate for control plane while handling creation or update error. The object will be garbage collected when the cluster is deleted.") + log.Error(err, "WARNING! Failed to cleanup InfrastructureMachineTemplate for control plane while handling creation or update error. The object will be garbage collected when the cluster is deleted.") } } } @@ -344,12 +342,13 @@ func (r *Reconciler) reconcileControlPlane(ctx context.Context, s *scope.Scope) if err != nil { // Best effort cleanup of the InfrastructureMachineTemplate (only on creation). infrastructureMachineCleanupFunc() - return false, errors.Wrapf(err, "failed to reconcile %s", tlog.KObj{Obj: s.Desired.ControlPlane.Object}) + return false, errors.Wrapf(err, "failed to reconcile %s %s", s.Desired.ControlPlane.Object.GetKind(), klog.KObj(s.Desired.ControlPlane.Object)) } } // Create or update the ControlPlaneObject for the ControlPlaneState. - ctx, _ = tlog.LoggerFrom(ctx).WithObject(s.Desired.ControlPlane.Object).Into(ctx) + log := ctrl.LoggerFrom(ctx).WithValues(s.Desired.ControlPlane.Object.GetKind(), klog.KObj(s.Desired.ControlPlane.Object)) + ctx = ctrl.LoggerInto(ctx, log) created, err := r.reconcileReferencedObject(ctx, reconcileReferencedObjectInput{ cluster: s.Current.Cluster, current: s.Current.ControlPlane.Object, @@ -368,9 +367,11 @@ func (r *Reconciler) reconcileControlPlane(ctx context.Context, s *scope.Scope) if s.Blueprint.HasControlPlaneInfrastructureMachine() && s.Current.ControlPlane.InfrastructureMachineTemplate != nil { if s.Current.ControlPlane.InfrastructureMachineTemplate.GetName() != s.Desired.ControlPlane.InfrastructureMachineTemplate.GetName() { if err := r.Client.Delete(ctx, s.Current.ControlPlane.InfrastructureMachineTemplate); err != nil { - return created, errors.Wrapf(err, "failed to delete oldinfrastructure machine template %s of control plane %s", - tlog.KObj{Obj: s.Current.ControlPlane.InfrastructureMachineTemplate}, - tlog.KObj{Obj: s.Current.ControlPlane.Object}, + return created, errors.Wrapf(err, "failed to delete old %s %s of %s %s", + s.Current.ControlPlane.InfrastructureMachineTemplate.GetKind(), + klog.KObj(s.Current.ControlPlane.InfrastructureMachineTemplate), + s.Current.ControlPlane.Object.GetKind(), + klog.KObj(s.Current.ControlPlane.Object), ) } } @@ -382,54 +383,55 @@ func (r *Reconciler) reconcileControlPlane(ctx context.Context, s *scope.Scope) // reconcileMachineHealthCheck creates, updates, deletes or leaves untouched a MachineHealthCheck depending on the difference between the // current state and the desired state. func (r *Reconciler) reconcileMachineHealthCheck(ctx context.Context, current, desired *clusterv1.MachineHealthCheck) error { - log := tlog.LoggerFrom(ctx) + log := ctrl.LoggerFrom(ctx) // If a current MachineHealthCheck doesn't exist but there is a desired MachineHealthCheck attempt to create. if current == nil && desired != nil { - log.Infof("Creating %s", tlog.KObj{Obj: desired}) + log.Info("Creating MachineHealthCheck", "MachineHealthCheck", klog.KObj(desired)) helper, err := r.patchHelperFactory(ctx, nil, desired) if err != nil { - return errors.Wrapf(err, "failed to create patch helper for %s", tlog.KObj{Obj: desired}) + return errors.Wrapf(err, "failed to create patch helper for MachineHealthCheck %s", klog.KObj(desired)) } if err := helper.Patch(ctx); err != nil { - return errors.Wrapf(err, "failed to create %s", tlog.KObj{Obj: desired}) + return errors.Wrapf(err, "failed to create MachineHealthCheck %s", klog.KObj(desired)) } - r.recorder.Eventf(desired, corev1.EventTypeNormal, createEventReason, "Created %q", tlog.KObj{Obj: desired}) + r.recorder.Eventf(desired, corev1.EventTypeNormal, createEventReason, "Created MachineHealthCheck %q", klog.KObj(desired)) return nil } // If a current MachineHealthCheck exists but there is no desired MachineHealthCheck attempt to delete. if current != nil && desired == nil { - log.Infof("Deleting %s", tlog.KObj{Obj: current}) + log.Info("Deleting MachineHealthCheck", "MachineHealthCheck", klog.KObj(current)) if err := r.Client.Delete(ctx, current); err != nil { // If the object to be deleted is not found don't throw an error. if !apierrors.IsNotFound(err) { - return errors.Wrapf(err, "failed to delete %s", tlog.KObj{Obj: current}) + return errors.Wrapf(err, "failed to delete MachineHealthCheck %s", klog.KObj(current)) } } - r.recorder.Eventf(current, corev1.EventTypeNormal, deleteEventReason, "Deleted %q", tlog.KObj{Obj: current}) + r.recorder.Eventf(current, corev1.EventTypeNormal, deleteEventReason, "Deleted MachineHealthCheck %q", klog.KObj(current)) return nil } - ctx, log = log.WithObject(current).Into(ctx) + log = log.WithValues("MachineHealthCheck", klog.KObj(current)) + ctx = ctrl.LoggerInto(ctx, log) // Check differences between current and desired MachineHealthChecks, and patch if required. // NOTE: we want to be authoritative on the entire spec because the users are // expected to change MHC fields from the ClusterClass only. patchHelper, err := r.patchHelperFactory(ctx, current, desired) if err != nil { - return errors.Wrapf(err, "failed to create patch helper for %s", tlog.KObj{Obj: current}) + return errors.Wrapf(err, "failed to create patch helper for MachineHealthCheck %s", klog.KObj(current)) } if !patchHelper.HasChanges() { - log.V(3).Infof("No changes for %s", tlog.KObj{Obj: current}) + log.V(3).Info("No changes for MachineHealthCheck") return nil } - log.Infof("Patching %s", tlog.KObj{Obj: current}) + log.Info("Patching MachineHealthCheck") if err := patchHelper.Patch(ctx); err != nil { - return errors.Wrapf(err, "failed to patch %s", tlog.KObj{Obj: current}) + return errors.Wrapf(err, "failed to patch MachineHealthCheck %s", klog.KObj(current)) } - r.recorder.Eventf(current, corev1.EventTypeNormal, updateEventReason, "Updated %q", tlog.KObj{Obj: current}) + r.recorder.Eventf(current, corev1.EventTypeNormal, updateEventReason, "Updated MachineHealthCheck %q", klog.KObj(current)) return nil } @@ -438,28 +440,28 @@ func (r *Reconciler) reconcileMachineHealthCheck(ctx context.Context, current, d // most specifically, after a Cluster is created it is assumed that the reference to the InfrastructureCluster / // ControlPlane objects should never change (only the content of the objects can change). func (r *Reconciler) reconcileCluster(ctx context.Context, s *scope.Scope) error { - ctx, log := tlog.LoggerFrom(ctx).WithObject(s.Desired.Cluster).Into(ctx) + log := ctrl.LoggerFrom(ctx) // Check differences between current and desired state, and eventually patch the current object. patchHelper, err := r.patchHelperFactory(ctx, s.Current.Cluster, s.Desired.Cluster) if err != nil { - return errors.Wrapf(err, "failed to create patch helper for %s", tlog.KObj{Obj: s.Current.Cluster}) + return errors.Wrapf(err, "failed to create patch helper for Cluster %s", klog.KObj(s.Current.Cluster)) } if !patchHelper.HasChanges() { - log.V(3).Infof("No changes for %s", tlog.KObj{Obj: s.Current.Cluster}) + log.V(3).Info("No changes for Cluster") return nil } changes := patchHelper.Changes() if len(changes) == 0 { - log.Infof("Patching %s", tlog.KObj{Obj: s.Current.Cluster}) + log.Info("Patching Cluster") } else { - log.Infof("Patching %s, diff: %s", tlog.KObj{Obj: s.Current.Cluster}, changes) + log.Info("Patching Cluster", "diff", string(changes)) } if err := patchHelper.Patch(ctx); err != nil { - return errors.Wrapf(err, "failed to patch %s", tlog.KObj{Obj: s.Current.Cluster}) + return errors.Wrapf(err, "failed to patch Cluster %s", klog.KObj(s.Current.Cluster)) } - r.recorder.Eventf(s.Current.Cluster, corev1.EventTypeNormal, updateEventReason, "Updated %q", tlog.KObj{Obj: s.Current.Cluster}) + r.recorder.Eventf(s.Current.Cluster, corev1.EventTypeNormal, updateEventReason, "Updated Cluster %q", klog.KObj(s.Current.Cluster)) // Wait until Cluster is updated in the cache. // Note: We have to do this because otherwise using a cached client in the Reconcile func could @@ -476,7 +478,7 @@ func (r *Reconciler) reconcileCluster(ctx context.Context, s *scope.Scope) error return s.Current.Cluster.GetResourceVersion() != cachedCluster.GetResourceVersion(), nil }) if err != nil { - return errors.Wrapf(err, "failed waiting for Cluster %s to be updated in the cache after patch", tlog.KObj{Obj: s.Current.Cluster}) + return errors.Wrapf(err, "failed waiting for Cluster %s to be updated in the cache after patch", klog.KObj(s.Current.Cluster)) } return nil } @@ -500,8 +502,9 @@ func (r *Reconciler) reconcileMachineDeployments(ctx context.Context, s *scope.S // Skip the MD creation if the MD already exists. if currentMDTopologyNames.Has(mdTopologyName) { - log := tlog.LoggerFrom(ctx).WithMachineDeployment(md.Object) - log.V(3).Infof(fmt.Sprintf("Skipping creation of MachineDeployment %s because MachineDeployment for topology %s already exists (only considered creation because of stale cache)", tlog.KObj{Obj: md.Object}, mdTopologyName)) + log := ctrl.LoggerFrom(ctx).WithValues("MachineDeployment", klog.KObj(md.Object), + "machineDeploymentTopology", mdTopologyName) + log.V(3).Info(fmt.Sprintf("Skipping creation of MachineDeployment, because MachineDeployment for topology %s already exists (only considered creation because of stale cache)", mdTopologyName)) continue } @@ -572,9 +575,13 @@ func (r *Reconciler) createMachineDeployment(ctx context.Context, s *scope.Scope return nil } - log := tlog.LoggerFrom(ctx).WithMachineDeployment(md.Object) + log := ctrl.LoggerFrom(ctx).WithValues( + "MachineDeployment", klog.KObj(md.Object), + "machineDeploymentTopology", mdTopologyName) + ctx = ctrl.LoggerInto(ctx, log) cluster := s.Current.Cluster - infraCtx, _ := log.WithObject(md.InfrastructureMachineTemplate).Into(ctx) + infraLog := log.WithValues(md.InfrastructureMachineTemplate.GetKind(), klog.KObj(md.InfrastructureMachineTemplate)) + infraCtx := ctrl.LoggerInto(ctx, infraLog) infrastructureMachineCleanupFunc := func() {} createdInfra, err := r.reconcileReferencedTemplate(infraCtx, reconcileReferencedTemplateInput{ cluster: cluster, @@ -589,15 +596,13 @@ func (r *Reconciler) createMachineDeployment(ctx context.Context, s *scope.Scope // Best effort cleanup of the InfrastructureMachineTemplate; // If this fails, the object will be garbage collected when the cluster is deleted. if err := r.Client.Delete(ctx, md.InfrastructureMachineTemplate); err != nil { - log := tlog.LoggerFrom(ctx). - WithValues(md.InfrastructureMachineTemplate.GetObjectKind().GroupVersionKind().Kind, md.InfrastructureMachineTemplate.GetName()). - WithValues("err", err.Error()) - log.Infof("WARNING! Failed to cleanup InfrastructureMachineTemplate for MachineDeployment while handling creation error. The object will be garbage collected when the cluster is deleted.") + infraLog.Error(err, "WARNING! Failed to cleanup InfrastructureMachineTemplate for MachineDeployment while handling creation error. The object will be garbage collected when the cluster is deleted.") } } } - bootstrapCtx, _ := log.WithObject(md.BootstrapTemplate).Into(ctx) + bootstrapLog := log.WithValues(md.BootstrapTemplate.GetKind(), klog.KObj(md.BootstrapTemplate)) + bootstrapCtx := ctrl.LoggerInto(ctx, bootstrapLog) bootstrapCleanupFunc := func() {} createdBootstrap, err := r.reconcileReferencedTemplate(bootstrapCtx, reconcileReferencedTemplateInput{ cluster: cluster, @@ -614,16 +619,12 @@ func (r *Reconciler) createMachineDeployment(ctx context.Context, s *scope.Scope // Best effort cleanup of the BootstrapTemplate; // If this fails, the object will be garbage collected when the cluster is deleted. if err := r.Client.Delete(ctx, md.BootstrapTemplate); err != nil { - log := tlog.LoggerFrom(ctx). - WithValues(md.BootstrapTemplate.GetObjectKind().GroupVersionKind().Kind, md.BootstrapTemplate.GetName()). - WithValues("err", err.Error()) - log.Infof("WARNING! Failed to cleanup BootstrapTemplate for MachineDeployment while handling creation error. The object will be garbage collected when the cluster is deleted.") + bootstrapLog.Error(err, "WARNING! Failed to cleanup BootstrapTemplate for MachineDeployment while handling creation error. The object will be garbage collected when the cluster is deleted.") } } } - log = log.WithObject(md.Object) - log.Infof(fmt.Sprintf("Creating %s", tlog.KObj{Obj: md.Object})) + log.Info("Creating MachineDeployment") helper, err := r.patchHelperFactory(ctx, nil, md.Object) if err != nil { // Best effort cleanup of the InfrastructureMachineTemplate & BootstrapTemplate (only on creation). @@ -637,7 +638,7 @@ func (r *Reconciler) createMachineDeployment(ctx context.Context, s *scope.Scope bootstrapCleanupFunc() return createErrorWithoutObjectName(ctx, err, md.Object) } - r.recorder.Eventf(cluster, corev1.EventTypeNormal, createEventReason, "Created %q", tlog.KObj{Obj: md.Object}) + r.recorder.Eventf(cluster, corev1.EventTypeNormal, createEventReason, "Created MachineDeployment %q", klog.KObj(md.Object)) // Wait until MachineDeployment is visible in the cache. // Note: We have to do this because otherwise using a cached client in current state could @@ -667,7 +668,8 @@ func (r *Reconciler) createMachineDeployment(ctx context.Context, s *scope.Scope // updateMachineDeployment updates a MachineDeployment. Also rotates the corresponding Templates if necessary. func (r *Reconciler) updateMachineDeployment(ctx context.Context, s *scope.Scope, mdTopologyName string, currentMD, desiredMD *scope.MachineDeploymentState) error { - log := tlog.LoggerFrom(ctx).WithMachineDeployment(desiredMD.Object) + log := ctrl.LoggerFrom(ctx).WithValues("MachineDeployment", klog.KObj(desiredMD.Object), + "machineDeploymentTopology", mdTopologyName) // Patch MachineHealthCheck for the MachineDeployment. // MHC changes are not Kubernetes version dependent, therefore proceed with MHC reconciliation @@ -686,7 +688,8 @@ func (r *Reconciler) updateMachineDeployment(ctx context.Context, s *scope.Scope } cluster := s.Current.Cluster - infraCtx, _ := log.WithObject(desiredMD.InfrastructureMachineTemplate).Into(ctx) + infraLog := log.WithValues(desiredMD.InfrastructureMachineTemplate.GetKind(), klog.KObj(desiredMD.InfrastructureMachineTemplate)) + infraCtx := ctrl.LoggerInto(ctx, infraLog) infrastructureMachineCleanupFunc := func() {} createdInfra, err := r.reconcileReferencedTemplate(infraCtx, reconcileReferencedTemplateInput{ cluster: cluster, @@ -697,7 +700,7 @@ func (r *Reconciler) updateMachineDeployment(ctx context.Context, s *scope.Scope compatibilityChecker: check.ObjectsAreCompatible, }) if err != nil { - return errors.Wrapf(err, "failed to reconcile %s", tlog.KObj{Obj: currentMD.Object}) + return errors.Wrapf(err, "failed to reconcile MachineDeployment %s", klog.KObj(currentMD.Object)) } if createdInfra { @@ -705,15 +708,13 @@ func (r *Reconciler) updateMachineDeployment(ctx context.Context, s *scope.Scope // Best effort cleanup of the InfrastructureMachineTemplate; // If this fails, the object will be garbage collected when the cluster is deleted. if err := r.Client.Delete(ctx, desiredMD.InfrastructureMachineTemplate); err != nil { - log := tlog.LoggerFrom(ctx). - WithValues(desiredMD.InfrastructureMachineTemplate.GetObjectKind().GroupVersionKind().Kind, desiredMD.InfrastructureMachineTemplate.GetName()). - WithValues("err", err.Error()) - log.Infof("WARNING! Failed to cleanup InfrastructureMachineTemplate for MachineDeployment while handling update error. The object will be garbage collected when the cluster is deleted.") + infraLog.Error(err, "WARNING! Failed to cleanup InfrastructureMachineTemplate for MachineDeployment while handling update error. The object will be garbage collected when the cluster is deleted.") } } } - bootstrapCtx, _ := log.WithObject(desiredMD.BootstrapTemplate).Into(ctx) + bootstrapLog := log.WithValues(desiredMD.BootstrapTemplate.GetKind(), klog.KObj(desiredMD.BootstrapTemplate)) + bootstrapCtx := ctrl.LoggerInto(ctx, bootstrapLog) bootstrapCleanupFunc := func() {} createdBootstrap, err := r.reconcileReferencedTemplate(bootstrapCtx, reconcileReferencedTemplateInput{ cluster: cluster, @@ -726,7 +727,7 @@ func (r *Reconciler) updateMachineDeployment(ctx context.Context, s *scope.Scope if err != nil { // Best effort cleanup of the InfrastructureMachineTemplate (only on template rotation). infrastructureMachineCleanupFunc() - return errors.Wrapf(err, "failed to reconcile %s", tlog.KObj{Obj: currentMD.Object}) + return errors.Wrapf(err, "failed to reconcile MachineDeployment %s", klog.KObj(currentMD.Object)) } if createdBootstrap { @@ -734,41 +735,37 @@ func (r *Reconciler) updateMachineDeployment(ctx context.Context, s *scope.Scope // Best effort cleanup of the BootstrapTemplate; // If this fails, the object will be garbage collected when the cluster is deleted. if err := r.Client.Delete(ctx, desiredMD.BootstrapTemplate); err != nil { - log := tlog.LoggerFrom(ctx). - WithValues(desiredMD.BootstrapTemplate.GetObjectKind().GroupVersionKind().Kind, desiredMD.BootstrapTemplate.GetName()). - WithValues("err", err.Error()) - log.Infof("WARNING! Failed to cleanup BootstrapTemplate for MachineDeployment while handling update error. The object will be garbage collected when the cluster is deleted.") + bootstrapLog.Error(err, "WARNING! Failed to cleanup BootstrapTemplate for MachineDeployment while handling update error. The object will be garbage collected when the cluster is deleted.") } } } // Check differences between current and desired MachineDeployment, and eventually patch the current object. - log = log.WithObject(desiredMD.Object) patchHelper, err := r.patchHelperFactory(ctx, currentMD.Object, desiredMD.Object) if err != nil { // Best effort cleanup of the InfrastructureMachineTemplate & BootstrapTemplate (only on template rotation). infrastructureMachineCleanupFunc() bootstrapCleanupFunc() - return errors.Wrapf(err, "failed to create patch helper for %s", tlog.KObj{Obj: currentMD.Object}) + return errors.Wrapf(err, "failed to create patch helper for MachineDeployment %s", klog.KObj(currentMD.Object)) } if !patchHelper.HasChanges() { - log.V(3).Infof("No changes for %s", tlog.KObj{Obj: currentMD.Object}) + log.V(3).Info("No changes for MachineDeployment") return nil } changes := patchHelper.Changes() if len(changes) == 0 { - log.Infof("Patching %s", tlog.KObj{Obj: currentMD.Object}) + log.Info("Patching MachineDeployment") } else { - log.Infof("Patching %s, diff: %s", tlog.KObj{Obj: currentMD.Object}, changes) + log.Info("Patching MachineDeployment", "diff", string(changes)) } if err := patchHelper.Patch(ctx); err != nil { // Best effort cleanup of the InfrastructureMachineTemplate & BootstrapTemplate (only on template rotation). infrastructureMachineCleanupFunc() bootstrapCleanupFunc() - return errors.Wrapf(err, "failed to patch %s", tlog.KObj{Obj: currentMD.Object}) + return errors.Wrapf(err, "failed to patch MachineDeployment %s", klog.KObj(currentMD.Object)) } - r.recorder.Eventf(cluster, corev1.EventTypeNormal, updateEventReason, "Updated %q%s", tlog.KObj{Obj: currentMD.Object}, logMachineDeploymentVersionChange(currentMD.Object, desiredMD.Object)) + r.recorder.Eventf(cluster, corev1.EventTypeNormal, updateEventReason, "Updated MachineDeployment %q%s", klog.KObj(currentMD.Object), logMachineDeploymentVersionChange(currentMD.Object, desiredMD.Object)) // Wait until MachineDeployment is updated in the cache. // Note: We have to do this because otherwise using a cached client in current state could @@ -785,7 +782,7 @@ func (r *Reconciler) updateMachineDeployment(ctx context.Context, s *scope.Scope return currentMD.Object.GetResourceVersion() != cachedMD.GetResourceVersion(), nil }) if err != nil { - return errors.Wrapf(err, "failed waiting for MachineDeployment %s to be updated in the cache after patch", tlog.KObj{Obj: currentMD.Object}) + return errors.Wrapf(err, "failed waiting for MachineDeployment %s to be updated in the cache after patch", klog.KObj(currentMD.Object)) } // We want to call both cleanup functions even if one of them fails to clean up as much as possible. @@ -805,7 +802,9 @@ func logMachineDeploymentVersionChange(current, desired *clusterv1.MachineDeploy // deleteMachineDeployment deletes a MachineDeployment. func (r *Reconciler) deleteMachineDeployment(ctx context.Context, cluster *clusterv1.Cluster, md *scope.MachineDeploymentState) error { - log := tlog.LoggerFrom(ctx).WithMachineDeployment(md.Object).WithObject(md.Object) + log := ctrl.LoggerFrom(ctx).WithValues( + "MachineDeployment", klog.KObj(md.Object), + "machineDeploymentTopology", md.Object.Labels[clusterv1.ClusterTopologyMachineDeploymentNameLabel]) // delete MachineHealthCheck for the MachineDeployment. if md.MachineHealthCheck != nil { @@ -813,11 +812,11 @@ func (r *Reconciler) deleteMachineDeployment(ctx context.Context, cluster *clust return err } } - log.Infof("Deleting %s", tlog.KObj{Obj: md.Object}) + log.Info("Deleting MachineDeployment") if err := r.Client.Delete(ctx, md.Object); err != nil && !apierrors.IsNotFound(err) { - return errors.Wrapf(err, "failed to delete %s", tlog.KObj{Obj: md.Object}) + return errors.Wrapf(err, "failed to delete MachineDeployment %s", klog.KObj(md.Object)) } - r.recorder.Eventf(cluster, corev1.EventTypeNormal, deleteEventReason, "Deleted %q", tlog.KObj{Obj: md.Object}) + r.recorder.Eventf(cluster, corev1.EventTypeNormal, deleteEventReason, "Deleted MachineDeployment %q", klog.KObj(md.Object)) return nil } @@ -840,8 +839,9 @@ func (r *Reconciler) reconcileMachinePools(ctx context.Context, s *scope.Scope) // Skip the MP creation if the MP already exists. if currentMPTopologyNames.Has(mpTopologyName) { - log := tlog.LoggerFrom(ctx).WithMachinePool(mp.Object) - log.V(3).Infof(fmt.Sprintf("Skipping creation of MachinePool %s because MachinePool for topology %s already exists (only considered creation because of stale cache)", tlog.KObj{Obj: mp.Object}, mpTopologyName)) + log := ctrl.LoggerFrom(ctx).WithValues("MachinePool", klog.KObj(mp.Object), + "machinePoolTopology", mpTopologyName) + log.V(3).Info(fmt.Sprintf("Skipping creation of MachinePool, because MachinePool for topology %s already exists (only considered creation because of stale cache)", mpTopologyName)) continue } @@ -855,7 +855,7 @@ func (r *Reconciler) reconcileMachinePools(ctx context.Context, s *scope.Scope) for _, mpTopologyName := range diff.toUpdate { currentMP := s.Current.MachinePools[mpTopologyName] desiredMP := s.Desired.MachinePools[mpTopologyName] - if err := r.updateMachinePool(ctx, s, currentMP, desiredMP); err != nil { + if err := r.updateMachinePool(ctx, s, mpTopologyName, currentMP, desiredMP); err != nil { return err } } @@ -911,16 +911,20 @@ func (r *Reconciler) createMachinePool(ctx context.Context, s *scope.Scope, mp * return nil } - log := tlog.LoggerFrom(ctx).WithMachinePool(mp.Object) + log := ctrl.LoggerFrom(ctx).WithValues( + "MachinePool", klog.KObj(mp.Object), + "machinePoolTopology", mpTopologyName) + ctx = ctrl.LoggerInto(ctx, log) cluster := s.Current.Cluster - infraCtx, _ := log.WithObject(mp.InfrastructureMachinePoolObject).Into(ctx) + infraLog := log.WithValues(mp.InfrastructureMachinePoolObject.GetKind(), klog.KObj(mp.InfrastructureMachinePoolObject)) + infraCtx := ctrl.LoggerInto(ctx, infraLog) infrastructureMachineMachinePoolCleanupFunc := func() {} createdInfrastructureMachinePool, err := r.reconcileReferencedObject(infraCtx, reconcileReferencedObjectInput{ cluster: cluster, desired: mp.InfrastructureMachinePoolObject, }) if err != nil { - return errors.Wrapf(err, "failed to create %s", mp.Object.Kind) + return errors.Wrapf(err, "failed to create MachinePool %s", klog.KObj(mp.Object)) } if createdInfrastructureMachinePool { @@ -928,15 +932,13 @@ func (r *Reconciler) createMachinePool(ctx context.Context, s *scope.Scope, mp * // Best effort cleanup of the InfrastructureMachinePool; // If this fails, the object will be garbage collected when the cluster is deleted. if err := r.Client.Delete(ctx, mp.InfrastructureMachinePoolObject); err != nil { - log := tlog.LoggerFrom(ctx). - WithValues(mp.InfrastructureMachinePoolObject.GetObjectKind().GroupVersionKind().Kind, mp.InfrastructureMachinePoolObject.GetName()). - WithValues("err", err.Error()) - log.Infof("WARNING! Failed to cleanup InfrastructureMachinePoolObject for MachinePool while handling creation error. The object will be garbage collected when the cluster is deleted.") + infraLog.Info("WARNING! Failed to cleanup InfrastructureMachinePoolObject for MachinePool while handling creation error. The object will be garbage collected when the cluster is deleted.") } } } - bootstrapCtx, _ := log.WithObject(mp.BootstrapObject).Into(ctx) + bootstrapLog := log.WithValues(mp.BootstrapObject.GetKind(), klog.KObj(mp.BootstrapObject)) + bootstrapCtx := ctrl.LoggerInto(ctx, bootstrapLog) bootstrapCleanupFunc := func() {} createdBootstrap, err := r.reconcileReferencedObject(bootstrapCtx, reconcileReferencedObjectInput{ cluster: cluster, @@ -945,7 +947,7 @@ func (r *Reconciler) createMachinePool(ctx context.Context, s *scope.Scope, mp * if err != nil { // Best effort cleanup of the InfrastructureMachinePool (only on creation). infrastructureMachineMachinePoolCleanupFunc() - return errors.Wrapf(err, "failed to create %s", mp.Object.Kind) + return errors.Wrapf(err, "failed to create MachinePool %s", mp.Object.Kind) } if createdBootstrap { @@ -953,16 +955,12 @@ func (r *Reconciler) createMachinePool(ctx context.Context, s *scope.Scope, mp * // Best effort cleanup of the BootstrapConfig; // If this fails, the object will be garbage collected when the cluster is deleted. if err := r.Client.Delete(ctx, mp.BootstrapObject); err != nil { - log := tlog.LoggerFrom(ctx). - WithValues(mp.BootstrapObject.GetObjectKind().GroupVersionKind().Kind, mp.BootstrapObject.GetName()). - WithValues("err", err.Error()) - log.Infof("WARNING! Failed to cleanup BootstrapObject for MachinePool while handling creation error. The object will be garbage collected when the cluster is deleted.") + bootstrapLog.Error(err, "WARNING! Failed to cleanup BootstrapObject for MachinePool while handling creation error. The object will be garbage collected when the cluster is deleted.") } } } - log = log.WithObject(mp.Object) - log.Infof(fmt.Sprintf("Creating %s", tlog.KObj{Obj: mp.Object})) + log.Info("Creating MachinePool") helper, err := r.patchHelperFactory(ctx, nil, mp.Object) if err != nil { // Best effort cleanup of the InfrastructureMachinePool & BootstrapConfig (only on creation). @@ -976,7 +974,7 @@ func (r *Reconciler) createMachinePool(ctx context.Context, s *scope.Scope, mp * bootstrapCleanupFunc() return createErrorWithoutObjectName(ctx, err, mp.Object) } - r.recorder.Eventf(cluster, corev1.EventTypeNormal, createEventReason, "Created %q", tlog.KObj{Obj: mp.Object}) + r.recorder.Eventf(cluster, corev1.EventTypeNormal, createEventReason, "Created MachinePool %q", klog.KObj(mp.Object)) // Wait until MachinePool is visible in the cache. // Note: We have to do this because otherwise using a cached client in current state could @@ -999,8 +997,9 @@ func (r *Reconciler) createMachinePool(ctx context.Context, s *scope.Scope, mp * } // updateMachinePool updates a MachinePool. Also updates the corresponding objects if necessary. -func (r *Reconciler) updateMachinePool(ctx context.Context, s *scope.Scope, currentMP, desiredMP *scope.MachinePoolState) error { - log := tlog.LoggerFrom(ctx).WithMachinePool(desiredMP.Object) +func (r *Reconciler) updateMachinePool(ctx context.Context, s *scope.Scope, mpTopologyName string, currentMP, desiredMP *scope.MachinePoolState) error { + log := ctrl.LoggerFrom(ctx).WithValues("MachinePool", klog.KObj(desiredMP.Object), + "machinePoolTopology", mpTopologyName) // Return early if the MachinePool is pending an upgrade. // Do not reconcile the MachinePool yet to avoid updating the MachinePool while it is still pending a @@ -1010,45 +1009,44 @@ func (r *Reconciler) updateMachinePool(ctx context.Context, s *scope.Scope, curr } cluster := s.Current.Cluster - infraCtx, _ := log.WithObject(desiredMP.InfrastructureMachinePoolObject).Into(ctx) + infraCtx := ctrl.LoggerInto(ctx, log.WithValues(desiredMP.InfrastructureMachinePoolObject.GetKind(), klog.KObj(desiredMP.InfrastructureMachinePoolObject))) if _, err := r.reconcileReferencedObject(infraCtx, reconcileReferencedObjectInput{ cluster: cluster, current: currentMP.InfrastructureMachinePoolObject, desired: desiredMP.InfrastructureMachinePoolObject, }); err != nil { - return errors.Wrapf(err, "failed to reconcile %s", tlog.KObj{Obj: currentMP.Object}) + return errors.Wrapf(err, "failed to reconcile MachinePool %s", klog.KObj(currentMP.Object)) } - bootstrapCtx, _ := log.WithObject(desiredMP.BootstrapObject).Into(ctx) + bootstrapCtx := ctrl.LoggerInto(ctx, log.WithValues(desiredMP.BootstrapObject.GetKind(), klog.KObj(desiredMP.BootstrapObject))) if _, err := r.reconcileReferencedObject(bootstrapCtx, reconcileReferencedObjectInput{ cluster: cluster, current: currentMP.BootstrapObject, desired: desiredMP.BootstrapObject, }); err != nil { - return errors.Wrapf(err, "failed to reconcile %s", tlog.KObj{Obj: currentMP.Object}) + return errors.Wrapf(err, "failed to reconcile MachinePool %s", klog.KObj(currentMP.Object)) } // Check differences between current and desired MachinePool, and eventually patch the current object. - log = log.WithObject(desiredMP.Object) patchHelper, err := r.patchHelperFactory(ctx, currentMP.Object, desiredMP.Object) if err != nil { - return errors.Wrapf(err, "failed to create patch helper for %s", tlog.KObj{Obj: currentMP.Object}) + return errors.Wrapf(err, "failed to create patch helper for MachinePool %s", klog.KObj(currentMP.Object)) } if !patchHelper.HasChanges() { - log.V(3).Infof("No changes for %s", tlog.KObj{Obj: currentMP.Object}) + log.V(3).Info("No changes for MachinePool") return nil } changes := patchHelper.Changes() if len(changes) == 0 { - log.Infof("Patching %s", tlog.KObj{Obj: currentMP.Object}) + log.Info("Patching MachinePool") } else { - log.Infof("Patching %s, diff: %s", tlog.KObj{Obj: currentMP.Object}, changes) + log.Info("Patching MachinePool", "diff", string(changes)) } if err := patchHelper.Patch(ctx); err != nil { - return errors.Wrapf(err, "failed to patch %s", tlog.KObj{Obj: currentMP.Object}) + return errors.Wrapf(err, "failed to patch MachinePool %s", klog.KObj(currentMP.Object)) } - r.recorder.Eventf(cluster, corev1.EventTypeNormal, updateEventReason, "Updated %q%s", tlog.KObj{Obj: currentMP.Object}, logMachinePoolVersionChange(currentMP.Object, desiredMP.Object)) + r.recorder.Eventf(cluster, corev1.EventTypeNormal, updateEventReason, "Updated MachinePool %q%s", klog.KObj(currentMP.Object), logMachinePoolVersionChange(currentMP.Object, desiredMP.Object)) // Wait until MachinePool is updated in the cache. // Note: We have to do this because otherwise using a cached client in current state could @@ -1065,7 +1063,7 @@ func (r *Reconciler) updateMachinePool(ctx context.Context, s *scope.Scope, curr return currentMP.Object.GetResourceVersion() != cachedMP.GetResourceVersion(), nil }) if err != nil { - return errors.Wrapf(err, "failed waiting for MachinePool %s to be updated in the cache after patch", tlog.KObj{Obj: currentMP.Object}) + return errors.Wrapf(err, "failed waiting for MachinePool %s to be updated in the cache after patch", klog.KObj(currentMP.Object)) } // We want to call both cleanup functions even if one of them fails to clean up as much as possible. @@ -1085,12 +1083,15 @@ func logMachinePoolVersionChange(current, desired *expv1.MachinePool) string { // deleteMachinePool deletes a MachinePool. func (r *Reconciler) deleteMachinePool(ctx context.Context, cluster *clusterv1.Cluster, mp *scope.MachinePoolState) error { - log := tlog.LoggerFrom(ctx).WithMachinePool(mp.Object).WithObject(mp.Object) - log.Infof("Deleting %s", tlog.KObj{Obj: mp.Object}) + log := ctrl.LoggerFrom(ctx).WithValues( + "MachinePool", klog.KObj(mp.Object), + "machinePoolTopology", mp.Object.Labels[clusterv1.ClusterTopologyMachinePoolNameLabel]) + + log.Info("Deleting MachinePool") if err := r.Client.Delete(ctx, mp.Object); err != nil && !apierrors.IsNotFound(err) { - return errors.Wrapf(err, "failed to delete %s", tlog.KObj{Obj: mp.Object}) + return errors.Wrapf(err, "failed to delete MachinePool %s", klog.KObj(mp.Object)) } - r.recorder.Eventf(cluster, corev1.EventTypeNormal, deleteEventReason, "Deleted %q", tlog.KObj{Obj: mp.Object}) + r.recorder.Eventf(cluster, corev1.EventTypeNormal, deleteEventReason, "Deleted MachinePool %q", klog.KObj(mp.Object)) return nil } @@ -1157,11 +1158,11 @@ type reconcileReferencedObjectInput struct { // NOTE: After a referenced object is created it is assumed that the reference should // never change (only the content of the object can eventually change). Thus, we are checking for strict compatibility. func (r *Reconciler) reconcileReferencedObject(ctx context.Context, in reconcileReferencedObjectInput) (bool, error) { - log := tlog.LoggerFrom(ctx) + log := ctrl.LoggerFrom(ctx) // If there is no current object, create it. if in.current == nil { - log.Infof("Creating %s", tlog.KObj{Obj: in.desired}) + log.Info(fmt.Sprintf("Creating %s", in.desired.GetKind()), in.desired.GetKind(), klog.KObj(in.desired)) helper, err := r.patchHelperFactory(ctx, nil, in.desired, structuredmerge.IgnorePaths(in.ignorePaths)) if err != nil { return false, errors.Wrap(createErrorWithoutObjectName(ctx, err, in.desired), "failed to create patch helper") @@ -1169,7 +1170,7 @@ func (r *Reconciler) reconcileReferencedObject(ctx context.Context, in reconcile if err := helper.Patch(ctx); err != nil { return false, createErrorWithoutObjectName(ctx, err, in.desired) } - r.recorder.Eventf(in.cluster, corev1.EventTypeNormal, createEventReason, "Created %q", tlog.KObj{Obj: in.desired}) + r.recorder.Eventf(in.cluster, corev1.EventTypeNormal, createEventReason, "Created %s %q", in.desired.GetKind(), klog.KObj(in.desired)) return true, nil } @@ -1178,26 +1179,29 @@ func (r *Reconciler) reconcileReferencedObject(ctx context.Context, in reconcile return false, allErrs.ToAggregate() } + log = log.WithValues(in.current.GetKind(), klog.KObj(in.current)) + ctx = ctrl.LoggerInto(ctx, log) + // Check differences between current and desired state, and eventually patch the current object. patchHelper, err := r.patchHelperFactory(ctx, in.current, in.desired, structuredmerge.IgnorePaths(in.ignorePaths)) if err != nil { - return false, errors.Wrapf(err, "failed to create patch helper for %s", tlog.KObj{Obj: in.current}) + return false, errors.Wrapf(err, "failed to create patch helper for %s %s", in.current.GetKind(), klog.KObj(in.current)) } if !patchHelper.HasChanges() { - log.V(3).Infof("No changes for %s", tlog.KObj{Obj: in.desired}) + log.V(3).Info(fmt.Sprintf("No changes for %s", in.desired.GetKind())) return false, nil } changes := patchHelper.Changes() if len(changes) == 0 { - log.Infof("Patching %s", tlog.KObj{Obj: in.desired}) + log.Info(fmt.Sprintf("Patching %s", in.desired.GetKind())) } else { - log.Infof("Patching %s, diff: %s", tlog.KObj{Obj: in.desired}, changes) + log.Info(fmt.Sprintf("Patching %s", in.desired.GetKind()), "diff", string(changes)) } if err := patchHelper.Patch(ctx); err != nil { - return false, errors.Wrapf(err, "failed to patch %s", tlog.KObj{Obj: in.current}) + return false, errors.Wrapf(err, "failed to patch %s %s", in.current.GetKind(), klog.KObj(in.current)) } - r.recorder.Eventf(in.cluster, corev1.EventTypeNormal, updateEventReason, "Updated %q%s", tlog.KObj{Obj: in.desired}, logUnstructuredVersionChange(in.current, in.desired, in.versionGetter)) + r.recorder.Eventf(in.cluster, corev1.EventTypeNormal, updateEventReason, "Updated %s %q%s", in.desired.GetKind(), klog.KObj(in.desired), logUnstructuredVersionChange(in.current, in.desired, in.versionGetter)) return false, nil } @@ -1240,11 +1244,11 @@ type reconcileReferencedTemplateInput struct { // can be executed afterwards. // NOTE: This func has a side effect in case of template rotation, changing both the desired object and the object reference. func (r *Reconciler) reconcileReferencedTemplate(ctx context.Context, in reconcileReferencedTemplateInput) (bool, error) { - log := tlog.LoggerFrom(ctx) + log := ctrl.LoggerFrom(ctx) // If there is no current object, create the desired object. if in.current == nil { - log.Infof("Creating %s", tlog.KObj{Obj: in.desired}) + log.Info(fmt.Sprintf("Creating %s", in.desired.GetKind()), in.desired.GetKind(), klog.KObj(in.desired)) helper, err := r.patchHelperFactory(ctx, nil, in.desired) if err != nil { return false, errors.Wrap(createErrorWithoutObjectName(ctx, err, in.desired), "failed to create patch helper") @@ -1252,12 +1256,12 @@ func (r *Reconciler) reconcileReferencedTemplate(ctx context.Context, in reconci if err := helper.Patch(ctx); err != nil { return false, createErrorWithoutObjectName(ctx, err, in.desired) } - r.recorder.Eventf(in.cluster, corev1.EventTypeNormal, createEventReason, "Created %q", tlog.KObj{Obj: in.desired}) + r.recorder.Eventf(in.cluster, corev1.EventTypeNormal, createEventReason, "Created %s %q", in.desired.GetKind(), klog.KObj(in.desired)) return true, nil } if in.ref == nil { - return false, errors.Errorf("failed to rotate %s: ref should not be nil", in.desired.GroupVersionKind()) + return false, errors.Errorf("failed to rotate %s: ref should not be nil", in.desired.GetKind()) } // Check if the current and desired referenced object are compatible. @@ -1265,15 +1269,18 @@ func (r *Reconciler) reconcileReferencedTemplate(ctx context.Context, in reconci return false, allErrs.ToAggregate() } + log = log.WithValues(in.current.GetKind(), klog.KObj(in.current)) + ctx = ctrl.LoggerInto(ctx, log) + // Check differences between current and desired objects, and if there are changes eventually start the template rotation. patchHelper, err := r.patchHelperFactory(ctx, in.current, in.desired) if err != nil { - return false, errors.Wrapf(err, "failed to create patch helper for %s", tlog.KObj{Obj: in.current}) + return false, errors.Wrapf(err, "failed to create patch helper for %s %s", in.current.GetKind(), klog.KObj(in.current)) } // Return if no changes are detected. if !patchHelper.HasChanges() { - log.V(3).Infof("No changes for %s", tlog.KObj{Obj: in.desired}) + log.V(3).Info(fmt.Sprintf("No changes for %s", in.desired.GetKind())) return false, nil } @@ -1282,14 +1289,14 @@ func (r *Reconciler) reconcileReferencedTemplate(ctx context.Context, in reconci if !patchHelper.HasSpecChanges() { changes := patchHelper.Changes() if len(changes) == 0 { - log.Infof("Patching %s", tlog.KObj{Obj: in.desired}) + log.Info(fmt.Sprintf("Patching %s", in.desired.GetKind())) } else { - log.Infof("Patching %s, diff: %s", tlog.KObj{Obj: in.desired}, changes) + log.Info(fmt.Sprintf("Patching %s", in.desired.GetKind()), "diff", string(changes)) } if err := patchHelper.Patch(ctx); err != nil { - return false, errors.Wrapf(err, "failed to patch %s", tlog.KObj{Obj: in.desired}) + return false, errors.Wrapf(err, "failed to patch %s %s", in.desired.GetKind(), klog.KObj(in.desired)) } - r.recorder.Eventf(in.cluster, corev1.EventTypeNormal, updateEventReason, "Updated %q (metadata changes)", tlog.KObj{Obj: in.desired}) + r.recorder.Eventf(in.cluster, corev1.EventTypeNormal, updateEventReason, "Updated %s %q (metadata changes)", in.desired.GetKind(), klog.KObj(in.desired)) return false, nil } @@ -1302,11 +1309,11 @@ func (r *Reconciler) reconcileReferencedTemplate(ctx context.Context, in reconci changes := patchHelper.Changes() if len(changes) == 0 { - log.Infof("Rotating %s, new name %s", tlog.KObj{Obj: in.current}, newName) + log.Info(fmt.Sprintf("Rotating %s, new name %s", in.current.GetKind(), newName)) } else { - log.Infof("Rotating %s, new name %s, diff: %s", tlog.KObj{Obj: in.current}, newName, changes) + log.Info(fmt.Sprintf("Rotating %s, new name %s", in.current.GetKind(), newName), "diff", string(changes)) } - log.Infof("Creating %s", tlog.KObj{Obj: in.desired}) + log.Info(fmt.Sprintf("Creating %s", in.current.GetKind())) helper, err := r.patchHelperFactory(ctx, nil, in.desired) if err != nil { return false, errors.Wrap(createErrorWithoutObjectName(ctx, err, in.desired), "failed to create patch helper") @@ -1314,7 +1321,7 @@ func (r *Reconciler) reconcileReferencedTemplate(ctx context.Context, in reconci if err := helper.Patch(ctx); err != nil { return false, createErrorWithoutObjectName(ctx, err, in.desired) } - r.recorder.Eventf(in.cluster, corev1.EventTypeNormal, createEventReason, "Created %q as a replacement for %q (template rotation)", tlog.KObj{Obj: in.desired}, in.ref.Name) + r.recorder.Eventf(in.cluster, corev1.EventTypeNormal, createEventReason, "Created %s %q as a replacement for %q (template rotation)", in.desired.GetKind(), klog.KObj(in.desired), in.ref.Name) // Update the reference with the new name. // NOTE: Updating the object hosting reference to the template is executed outside this func. diff --git a/internal/controllers/topology/cluster/util.go b/internal/controllers/topology/cluster/util.go index 81a6963ae29b..3a2111713bea 100644 --- a/internal/controllers/topology/cluster/util.go +++ b/internal/controllers/topology/cluster/util.go @@ -22,6 +22,7 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/klog/v2" "sigs.k8s.io/cluster-api/controllers/external" ) @@ -34,7 +35,7 @@ func (r *Reconciler) getReference(ctx context.Context, ref *corev1.ObjectReferen obj, err := external.Get(ctx, r.Client, ref, ref.Namespace) if err != nil { - return nil, errors.Wrapf(err, "failed to retrieve %s %q in namespace %q", ref.Kind, ref.Name, ref.Namespace) + return nil, errors.Wrapf(err, "failed to retrieve %s %s", ref.Kind, klog.KRef(ref.Namespace, ref.Name)) } return obj, nil } diff --git a/internal/controllers/topology/machinedeployment/machinedeployment_controller.go b/internal/controllers/topology/machinedeployment/machinedeployment_controller.go index ad821dda1ab6..775ae885dbc8 100644 --- a/internal/controllers/topology/machinedeployment/machinedeployment_controller.go +++ b/internal/controllers/topology/machinedeployment/machinedeployment_controller.go @@ -33,7 +33,6 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/internal/controllers/topology/machineset" - tlog "sigs.k8s.io/cluster-api/internal/log" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/labels" @@ -185,11 +184,11 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, md *clusterv1.MachineD // Delete unused templates. ref := md.Spec.Template.Spec.Bootstrap.ConfigRef if err := machineset.DeleteTemplateIfUnused(ctx, r.Client, templatesInUse, ref); err != nil { - return errors.Wrapf(err, "failed to delete bootstrap template for %s", tlog.KObj{Obj: md}) + return errors.Wrapf(err, "failed to delete %s %s for MachineDeployment %s", ref.Kind, klog.KRef(ref.Namespace, ref.Name), klog.KObj(md)) } ref = &md.Spec.Template.Spec.InfrastructureRef if err := machineset.DeleteTemplateIfUnused(ctx, r.Client, templatesInUse, ref); err != nil { - return errors.Wrapf(err, "failed to delete infrastructure template for %s", tlog.KObj{Obj: md}) + return errors.Wrapf(err, "failed to delete %s %s for MachineDeployment %s", ref.Kind, klog.KRef(ref.Namespace, ref.Name), klog.KObj(md)) } // If the MachineDeployment has a MachineHealthCheck delete it. @@ -214,7 +213,7 @@ func (r *Reconciler) deleteMachineHealthCheckForMachineDeployment(ctx context.Co if apierrors.IsNotFound(err) { return nil } - return errors.Wrapf(err, "failed to delete %s", tlog.KObj{Obj: mhc}) + return errors.Wrapf(err, "failed to delete MachineHealthCheck %s", klog.KObj(mhc)) } return nil } diff --git a/internal/controllers/topology/machineset/machineset_controller.go b/internal/controllers/topology/machineset/machineset_controller.go index 6ce4daad152c..65584eafc7d9 100644 --- a/internal/controllers/topology/machineset/machineset_controller.go +++ b/internal/controllers/topology/machineset/machineset_controller.go @@ -34,7 +34,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - tlog "sigs.k8s.io/cluster-api/internal/log" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/labels" @@ -210,11 +209,11 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, ms *clusterv1.MachineS // Delete unused templates. ref := ms.Spec.Template.Spec.Bootstrap.ConfigRef if err := DeleteTemplateIfUnused(ctx, r.Client, templatesInUse, ref); err != nil { - return errors.Wrapf(err, "failed to delete bootstrap template for %s", tlog.KObj{Obj: ms}) + return errors.Wrapf(err, "failed to delete %s %s for MachineSet %s", ref.Kind, klog.KRef(ref.Namespace, ref.Name), klog.KObj(ms)) } ref = &ms.Spec.Template.Spec.InfrastructureRef if err := DeleteTemplateIfUnused(ctx, r.Client, templatesInUse, ref); err != nil { - return errors.Wrapf(err, "failed to delete infrastructure template for %s", tlog.KObj{Obj: ms}) + return errors.Wrapf(err, "failed to delete %s %s for MachineSet %s", ref.Kind, klog.KRef(ref.Namespace, ref.Name), klog.KObj(ms)) } // Remove the finalizer so the MachineSet can be garbage collected by Kubernetes. @@ -231,8 +230,8 @@ func getMachineDeploymentName(ms *clusterv1.MachineSet) (*types.NamespacedName, } gv, err := schema.ParseGroupVersion(ref.APIVersion) if err != nil { - return nil, errors.Errorf("could not calculate MachineDeployment name for %s: invalid apiVersion %q: %v", - tlog.KObj{Obj: ms}, ref.APIVersion, err) + return nil, errors.Errorf("could not calculate MachineDeployment name for MachineSet %s: invalid apiVersion %q: %v", + klog.KObj(ms), ref.APIVersion, err) } if gv.Group == clusterv1.GroupVersion.Group { return &client.ObjectKey{Namespace: ms.Namespace, Name: ref.Name}, nil @@ -242,5 +241,5 @@ func getMachineDeploymentName(ms *clusterv1.MachineSet) (*types.NamespacedName, // Note: Once we set an owner reference to a MachineDeployment in a MachineSet it stays there // and is not deleted when the MachineDeployment is deleted. So we assume there's something wrong, // if we couldn't find a MachineDeployment owner reference. - return nil, errors.Errorf("could not calculate MachineDeployment name for %s", tlog.KObj{Obj: ms}) + return nil, errors.Errorf("could not calculate MachineDeployment name for MachineSet %s", klog.KObj(ms)) } diff --git a/internal/controllers/topology/machineset/util.go b/internal/controllers/topology/machineset/util.go index bc3cb761344b..578ecba79676 100644 --- a/internal/controllers/topology/machineset/util.go +++ b/internal/controllers/topology/machineset/util.go @@ -25,11 +25,12 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/controllers/external" - tlog "sigs.k8s.io/cluster-api/internal/log" ) // GetMachineSetsForDeployment returns a list of MachineSets associated with a MachineDeployment. @@ -62,7 +63,7 @@ func CalculateTemplatesInUse(md *clusterv1.MachineDeployment, msList []*clusterv bootstrapRef := ms.Spec.Template.Spec.Bootstrap.ConfigRef infrastructureRef := &ms.Spec.Template.Spec.InfrastructureRef if err := addTemplateRef(templatesInUse, bootstrapRef, infrastructureRef); err != nil { - return nil, errors.Wrapf(err, "failed to add templates of %s to templatesInUse", tlog.KObj{Obj: ms}) + return nil, errors.Wrapf(err, "failed to add templates of MachineSet %s to templatesInUse", klog.KObj(ms)) } } @@ -76,7 +77,7 @@ func CalculateTemplatesInUse(md *clusterv1.MachineDeployment, msList []*clusterv bootstrapRef := md.Spec.Template.Spec.Bootstrap.ConfigRef infrastructureRef := &md.Spec.Template.Spec.InfrastructureRef if err := addTemplateRef(templatesInUse, bootstrapRef, infrastructureRef); err != nil { - return nil, errors.Wrapf(err, "failed to add templates of %s to templatesInUse", tlog.KObj{Obj: md}) + return nil, errors.Wrapf(err, "failed to add templates of MachineDeployment %s to templatesInUse", klog.KObj(md)) } return templatesInUse, nil } @@ -88,7 +89,7 @@ func DeleteTemplateIfUnused(ctx context.Context, c client.Client, templatesInUse return nil } - log := tlog.LoggerFrom(ctx).WithRef(ref) + log := ctrl.LoggerFrom(ctx).WithValues(ref.Kind, klog.KRef(ref.Namespace, ref.Name)) refID, err := templateRefID(ref) if err != nil { @@ -97,13 +98,13 @@ func DeleteTemplateIfUnused(ctx context.Context, c client.Client, templatesInUse // If the template is still in use, do nothing. if templatesInUse[refID] { - log.V(3).Infof("Not deleting %s, because it's still in use", tlog.KRef{Ref: ref}) + log.V(3).Info(fmt.Sprintf("Not deleting %s, because it's still in use", ref.Kind)) return nil } - log.Infof("Deleting %s", tlog.KRef{Ref: ref}) + log.Info(fmt.Sprintf("Deleting %s", ref.Kind)) if err := external.Delete(ctx, c, ref); err != nil && !apierrors.IsNotFound(err) { - return errors.Wrapf(err, "failed to delete %s", tlog.KRef{Ref: ref}) + return errors.Wrapf(err, "failed to delete %s %s", ref.Kind, klog.KRef(ref.Namespace, ref.Name)) } return nil } diff --git a/internal/hooks/tracking.go b/internal/hooks/tracking.go index d8d91b1b5b2b..99a885940adf 100644 --- a/internal/hooks/tracking.go +++ b/internal/hooks/tracking.go @@ -23,11 +23,12 @@ import ( "github.com/pkg/errors" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1" runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" - tlog "sigs.k8s.io/cluster-api/internal/log" "sigs.k8s.io/cluster-api/util/patch" ) @@ -115,9 +116,14 @@ func IsOkToDelete(obj client.Object) bool { // MarkAsOkToDelete adds the OkToDeleteAnnotation annotation to the object and patches it. func MarkAsOkToDelete(ctx context.Context, c client.Client, obj client.Object) error { + gvk, err := apiutil.GVKForObject(obj, c.Scheme()) + if err != nil { + return errors.Wrapf(err, "failed to mark %s as ok to delete: failed to get GVK for object", klog.KObj(obj)) + } + patchHelper, err := patch.NewHelper(obj, c) if err != nil { - return errors.Wrapf(err, "failed to mark %s as ok to delete", tlog.KObj{Obj: obj}) + return errors.Wrapf(err, "failed to mark %s %s as ok to delete", gvk.Kind, klog.KObj(obj)) } annotations := obj.GetAnnotations() @@ -128,7 +134,7 @@ func MarkAsOkToDelete(ctx context.Context, c client.Client, obj client.Object) e obj.SetAnnotations(annotations) if err := patchHelper.Patch(ctx, obj); err != nil { - return errors.Wrapf(err, "failed to mark %s as ok to delete", tlog.KObj{Obj: obj}) + return errors.Wrapf(err, "failed to mark %s %s as ok to delete", gvk.Kind, klog.KObj(obj)) } return nil diff --git a/internal/log/doc.go b/internal/log/doc.go deleted file mode 100644 index 2a6ae4b55fed..000000000000 --- a/internal/log/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package log provides log utilities for the topology package. -package log diff --git a/internal/log/log.go b/internal/log/log.go deleted file mode 100644 index ca349e6e4b36..000000000000 --- a/internal/log/log.go +++ /dev/null @@ -1,191 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package log - -import ( - "context" - "fmt" - - "github.com/go-logr/logr" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/klog/v2" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" -) - -// LoggerFrom returns a logger with predefined values from a context.Context. -// The logger, when used with controllers, can be expected to contain basic information about the object -// that's being reconciled like: -// - `reconciler group` and `reconciler kind` coming from the For(...) object passed in when building a controller. -// - `name` and `namespace` injected from the reconciliation request. -// -// This is meant to be used with the context supplied in a struct that satisfies the Reconciler interface. -func LoggerFrom(ctx context.Context) Logger { - log := ctrl.LoggerFrom(ctx) - return &topologyReconcileLogger{ - // We use call depth 1 so the logger prints the log line of the caller of the log func (e.g. Infof) - // not of the log func. - // NOTE: We do this once here, so that we don't have to do this on every log call. - Logger: log.WithCallDepth(1), - } -} - -// Logger provides a wrapper to log.Logger to be used for topology reconciler. -type Logger interface { - // WithObject adds to the logger information about the object being modified by reconcile, which in most case it is - // a resources being part of the Cluster by reconciled. - WithObject(obj client.Object) Logger - - // WithRef adds to the logger information about the object ref being modified by reconcile, which in most case it is - // a resources being part of the Cluster by reconciled. - WithRef(ref *corev1.ObjectReference) Logger - - // WithMachineDeployment adds to the logger information about the MachineDeployment object being processed. - WithMachineDeployment(md *clusterv1.MachineDeployment) Logger - - // WithMachinePool adds to the logger information about the MachinePool object being processed. - WithMachinePool(mp *expv1.MachinePool) Logger - - // WithValues adds key-value pairs of context to a logger. - WithValues(keysAndValues ...interface{}) Logger - - // V returns a logger value for a specific verbosity level, relative to - // this logger. - V(level int) Logger - - // Infof logs to the INFO log. - // Arguments are handled in the manner of fmt.Printf. - Infof(msg string, a ...interface{}) - - // Into takes a context and sets the logger as one of its keys. - // - // This is meant to be used in reconcilers to enrich the logger within a context with additional values. - Into(ctx context.Context) (context.Context, Logger) -} - -// topologyReconcileLogger implements Logger. -type topologyReconcileLogger struct { - logr.Logger -} - -// WithObject adds to the logger information about the object being modified by reconcile, which in most case it is -// a resources being part of the Cluster by reconciled. -func (l *topologyReconcileLogger) WithObject(obj client.Object) Logger { - return &topologyReconcileLogger{ - Logger: l.Logger.WithValues( - "resource", metav1.GroupVersionResource{ - Version: obj.GetObjectKind().GroupVersionKind().Version, - Group: obj.GetObjectKind().GroupVersionKind().GroupKind().Group, - Resource: obj.GetObjectKind().GroupVersionKind().Kind, - }, - obj.GetObjectKind().GroupVersionKind().Kind, klog.KObj(obj), - ), - } -} - -// WithRef adds to the logger information about the object ref being modified by reconcile, which in most case it is -// a resources being part of the Cluster by reconciled. -func (l *topologyReconcileLogger) WithRef(ref *corev1.ObjectReference) Logger { - return &topologyReconcileLogger{ - Logger: l.Logger.WithValues( - "resource", metav1.GroupVersionResource{ - Version: ref.GetObjectKind().GroupVersionKind().Version, - Group: ref.GetObjectKind().GroupVersionKind().GroupKind().Group, - Resource: ref.GetObjectKind().GroupVersionKind().Kind, - }, - ref.GetObjectKind().GroupVersionKind().Kind, klog.KRef(ref.Namespace, ref.Name), - ), - } -} - -// WithMachineDeployment adds to the logger information about the MachineDeployment object being processed. -func (l *topologyReconcileLogger) WithMachineDeployment(md *clusterv1.MachineDeployment) Logger { - topologyName := md.Labels[clusterv1.ClusterTopologyMachineDeploymentNameLabel] - return &topologyReconcileLogger{ - Logger: l.Logger.WithValues( - "MachineDeployment", klog.KObj(md), - "machineDeploymentTopology", topologyName, - ), - } -} - -// WithMachinePool adds to the logger information about the MachinePool object being processed. -func (l *topologyReconcileLogger) WithMachinePool(mp *expv1.MachinePool) Logger { - topologyName := mp.Labels[clusterv1.ClusterTopologyMachinePoolNameLabel] - return &topologyReconcileLogger{ - Logger: l.Logger.WithValues( - "MachinePool", klog.KObj(mp), - "machinePoolTopology", topologyName, - ), - } -} - -// WithValues adds key-value pairs of context to a logger. -func (l *topologyReconcileLogger) WithValues(keysAndValues ...interface{}) Logger { - l.Logger = l.Logger.WithValues(keysAndValues...) - return l -} - -// V returns a logger value for a specific verbosity level, relative to -// this logger. -func (l *topologyReconcileLogger) V(level int) Logger { - return &topologyReconcileLogger{ - Logger: l.Logger.V(level), - } -} - -// Infof logs to the INFO log. -// Arguments are handled in the manner of fmt.Printf. -func (l *topologyReconcileLogger) Infof(msg string, a ...interface{}) { - l.Logger.Info(fmt.Sprintf(msg, a...)) -} - -// Into takes a context and sets the logger as one of its keys. -// -// This is meant to be used in reconcilers to enrich the logger within a context with additional values. -func (l *topologyReconcileLogger) Into(ctx context.Context) (context.Context, Logger) { - return ctrl.LoggerInto(ctx, l.Logger), l -} - -// KObj return a reference to a Kubernetes object in the same format used by kubectl commands (kind/name). -// Note: We're intentionally not using klog.KObj as we want the kind/name format instead of namespace/name. -type KObj struct { - Obj client.Object -} - -func (ref KObj) String() string { - if ref.Obj == nil { - return "" - } - return fmt.Sprintf("%s/%s", ref.Obj.GetObjectKind().GroupVersionKind().Kind, ref.Obj.GetName()) -} - -// KRef return a reference to a Kubernetes object in the same format used by kubectl commands (kind/name). -type KRef struct { - Ref *corev1.ObjectReference -} - -func (ref KRef) String() string { - if ref.Ref == nil { - return "" - } - return fmt.Sprintf("%s/%s", ref.Ref.Kind, ref.Ref.Name) -} diff --git a/internal/runtime/client/client.go b/internal/runtime/client/client.go index 78ea98c63d07..dbfdb6687a3c 100644 --- a/internal/runtime/client/client.go +++ b/internal/runtime/client/client.go @@ -346,26 +346,26 @@ func (c *client) CallExtension(ctx context.Context, hook runtimecatalog.Hook, fo ignore := *registration.FailurePolicy == runtimev1.FailurePolicyIgnore if _, ok := err.(errCallingExtensionHandler); ok && ignore { // Update the response to a default success response and return. - log.Error(err, fmt.Sprintf("ignoring error calling extension handler because of FailurePolicy %q", *registration.FailurePolicy)) + log.Error(err, fmt.Sprintf("Ignoring error calling extension handler because of FailurePolicy %q", *registration.FailurePolicy)) response.SetStatus(runtimehooksv1.ResponseStatusSuccess) response.SetMessage("") return nil } - log.Error(err, "failed to call extension handler") + log.Error(err, "Failed to call extension handler") return errors.Wrapf(err, "failed to call extension handler %q", name) } // If the received response is a failure then return an error. if response.GetStatus() == runtimehooksv1.ResponseStatusFailure { - log.Info(fmt.Sprintf("failed to call extension handler %q: got failure response with message %v", name, response.GetMessage())) + log.Info(fmt.Sprintf("Failed to call extension handler %q: got failure response with message %v", name, response.GetMessage())) // Don't add the message to the error as it is may be unique causing too many reconciliations. Ref: https://github.com/kubernetes-sigs/cluster-api/issues/6921 return errors.Errorf("failed to call extension handler %q: got failure response", name) } if retryResponse, ok := response.(runtimehooksv1.RetryResponseObject); ok && retryResponse.GetRetryAfterSeconds() != 0 { - log.Info(fmt.Sprintf("extension handler returned blocking response with retryAfterSeconds of %d", retryResponse.GetRetryAfterSeconds())) + log.Info(fmt.Sprintf("Extension handler returned blocking response with retryAfterSeconds of %d", retryResponse.GetRetryAfterSeconds())) } else { - log.Info("extension handler returned success response") + log.Info("Extension handler returned success response") } // Received a successful response from the extension handler. The `response` object