Skip to content

Commit

Permalink
Add new ClusterCache implementation
Browse files Browse the repository at this point in the history
  • Loading branch information
sbueringer committed Oct 14, 2024
1 parent 7e2e9c4 commit 19c80d4
Show file tree
Hide file tree
Showing 65 changed files with 3,900 additions and 712 deletions.
6 changes: 6 additions & 0 deletions api/v1beta1/v1beta2_condition_consts.go
Original file line number Diff line number Diff line change
Expand Up @@ -203,6 +203,12 @@ const (
// is detected (or whatever period is defined in the --remote-connection-grace-period flag).
ClusterRemoteConnectionProbeV1Beta2Condition = "RemoteConnectionProbe"

// ClusterRemoteConnectionProbeFailedV1Beta2Reason surfaces issues with the connection to the workload cluster.
ClusterRemoteConnectionProbeFailedV1Beta2Reason = "RemoteConnectionProbeFailed"

// ClusterRemoteConnectionProbeSucceededV1Beta2Reason is used to report a working connection with the workload cluster.
ClusterRemoteConnectionProbeSucceededV1Beta2Reason = "RemoteConnectionProbeSucceeded"

// ClusterScalingUpV1Beta2Condition is true if available replicas < desired replicas.
ClusterScalingUpV1Beta2Condition = ScalingUpV1Beta2Condition

Expand Down
6 changes: 3 additions & 3 deletions bootstrap/kubeadm/controllers/alias.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/controller"

kubeadmbootstrapcontrollers "sigs.k8s.io/cluster-api/bootstrap/kubeadm/internal/controllers"
"sigs.k8s.io/cluster-api/controllers/remote"
"sigs.k8s.io/cluster-api/controllers/clustercache"
)

// Following types provides access to reconcilers implemented in internal/controllers, thus
Expand All @@ -41,7 +41,7 @@ type KubeadmConfigReconciler struct {
Client client.Client
SecretCachingClient client.Client

Tracker *remote.ClusterCacheTracker
ClusterCache clustercache.ClusterCache

// WatchFilterValue is the label value used to filter events prior to reconciliation.
WatchFilterValue string
Expand All @@ -55,7 +55,7 @@ func (r *KubeadmConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl
return (&kubeadmbootstrapcontrollers.KubeadmConfigReconciler{
Client: r.Client,
SecretCachingClient: r.SecretCachingClient,
Tracker: r.Tracker,
ClusterCache: r.ClusterCache,
WatchFilterValue: r.WatchFilterValue,
TokenTTL: r.TokenTTL,
}).SetupWithManager(ctx, mgr, options)
Expand Down
18 changes: 8 additions & 10 deletions bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ import (
"sigs.k8s.io/cluster-api/bootstrap/kubeadm/internal/locking"
kubeadmtypes "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types"
bsutil "sigs.k8s.io/cluster-api/bootstrap/util"
"sigs.k8s.io/cluster-api/controllers/remote"
"sigs.k8s.io/cluster-api/controllers/clustercache"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
"sigs.k8s.io/cluster-api/feature"
"sigs.k8s.io/cluster-api/internal/util/taints"
Expand Down Expand Up @@ -83,7 +83,7 @@ type InitLocker interface {
type KubeadmConfigReconciler struct {
Client client.Client
SecretCachingClient client.Client
Tracker *remote.ClusterCacheTracker
ClusterCache clustercache.ClusterCache
KubeadmInitLock InitLocker

// WatchFilterValue is the label value used to filter events prior to reconciliation.
Expand Down Expand Up @@ -135,7 +135,7 @@ func (r *KubeadmConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl
predicates.ResourceHasFilterLabel(mgr.GetScheme(), predicateLog, r.WatchFilterValue),
),
),
)
).WatchesRawSource(r.ClusterCache.GetClusterSource("kubeadmconfig", r.ClusterToKubeadmConfigs))

if err := b.Complete(r); err != nil {
return errors.Wrap(err, "failed setting up with a controller manager")
Expand Down Expand Up @@ -242,10 +242,8 @@ func (r *KubeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques
}

res, err := r.reconcile(ctx, scope, cluster, config, configOwner)
if err != nil && errors.Is(err, remote.ErrClusterLocked) {
// Requeue if the reconcile failed because the ClusterCacheTracker was locked for
// the current cluster because of concurrent access.
log.V(5).Info("Requeuing because another worker has the lock on the ClusterCacheTracker")
if err != nil && errors.Is(err, clustercache.ErrClusterNotConnected) {
log.V(5).Info("Requeuing because connection to the workload cluster is down")
return ctrl.Result{RequeueAfter: time.Minute}, nil
}
return res, err
Expand Down Expand Up @@ -320,7 +318,7 @@ func (r *KubeadmConfigReconciler) refreshBootstrapTokenIfNeeded(ctx context.Cont
log := ctrl.LoggerFrom(ctx)
token := config.Spec.JoinConfiguration.Discovery.BootstrapToken.Token

remoteClient, err := r.Tracker.GetClient(ctx, util.ObjectKey(cluster))
remoteClient, err := r.ClusterCache.GetClient(ctx, util.ObjectKey(cluster))
if err != nil {
return ctrl.Result{}, err
}
Expand Down Expand Up @@ -367,7 +365,7 @@ func (r *KubeadmConfigReconciler) refreshBootstrapTokenIfNeeded(ctx context.Cont
func (r *KubeadmConfigReconciler) rotateMachinePoolBootstrapToken(ctx context.Context, config *bootstrapv1.KubeadmConfig, cluster *clusterv1.Cluster, scope *Scope) (ctrl.Result, error) {
log := ctrl.LoggerFrom(ctx)
log.V(2).Info("Config is owned by a MachinePool, checking if token should be rotated")
remoteClient, err := r.Tracker.GetClient(ctx, util.ObjectKey(cluster))
remoteClient, err := r.ClusterCache.GetClient(ctx, util.ObjectKey(cluster))
if err != nil {
return ctrl.Result{}, err
}
Expand Down Expand Up @@ -1087,7 +1085,7 @@ func (r *KubeadmConfigReconciler) reconcileDiscovery(ctx context.Context, cluste

// if BootstrapToken already contains a token, respect it; otherwise create a new bootstrap token for the node to join
if config.Spec.JoinConfiguration.Discovery.BootstrapToken.Token == "" {
remoteClient, err := r.Tracker.GetClient(ctx, util.ObjectKey(cluster))
remoteClient, err := r.ClusterCache.GetClient(ctx, util.ObjectKey(cluster))
if err != nil {
return ctrl.Result{}, err
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ import (
"time"

ignition "github.com/flatcar/ignition/config/v2_3"
"github.com/go-logr/logr"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand All @@ -34,14 +33,13 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/yaml"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
bootstrapbuilder "sigs.k8s.io/cluster-api/bootstrap/kubeadm/internal/builder"
"sigs.k8s.io/cluster-api/controllers/remote"
"sigs.k8s.io/cluster-api/controllers/clustercache"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
"sigs.k8s.io/cluster-api/feature"
"sigs.k8s.io/cluster-api/internal/test/builder"
Expand Down Expand Up @@ -509,7 +507,7 @@ func TestKubeadmConfigReconciler_Reconcile_GenerateCloudConfigData(t *testing.T)
k := &KubeadmConfigReconciler{
Client: myclient,
SecretCachingClient: myclient,
Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}),
ClusterCache: clustercache.NewFakeClusterCache(myclient, client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}),
KubeadmInitLock: &myInitLocker{},
}

Expand Down Expand Up @@ -571,7 +569,7 @@ func TestKubeadmConfigReconciler_Reconcile_ErrorIfJoiningControlPlaneHasInvalidC
k := &KubeadmConfigReconciler{
Client: myclient,
SecretCachingClient: myclient,
Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}),
ClusterCache: clustercache.NewFakeClusterCache(myclient, client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}),
KubeadmInitLock: &myInitLocker{},
}

Expand Down Expand Up @@ -693,7 +691,7 @@ func TestReconcileIfJoinCertificatesAvailableConditioninNodesAndControlPlaneIsRe
k := &KubeadmConfigReconciler{
Client: myclient,
SecretCachingClient: myclient,
Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}),
ClusterCache: clustercache.NewFakeClusterCache(myclient, client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}),
KubeadmInitLock: &myInitLocker{},
}

Expand Down Expand Up @@ -770,7 +768,7 @@ func TestReconcileIfJoinNodePoolsAndControlPlaneIsReady(t *testing.T) {
k := &KubeadmConfigReconciler{
Client: myclient,
SecretCachingClient: myclient,
Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}),
ClusterCache: clustercache.NewFakeClusterCache(myclient, client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}),
KubeadmInitLock: &myInitLocker{},
}

Expand Down Expand Up @@ -871,7 +869,7 @@ func TestBootstrapDataFormat(t *testing.T) {
k := &KubeadmConfigReconciler{
Client: myclient,
SecretCachingClient: myclient,
Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}),
ClusterCache: clustercache.NewFakeClusterCache(myclient, client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}),
KubeadmInitLock: &myInitLocker{},
}
request := ctrl.Request{
Expand Down Expand Up @@ -952,7 +950,7 @@ func TestKubeadmConfigSecretCreatedStatusNotPatched(t *testing.T) {
k := &KubeadmConfigReconciler{
Client: myclient,
SecretCachingClient: myclient,
Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}),
ClusterCache: clustercache.NewFakeClusterCache(myclient, client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}),
KubeadmInitLock: &myInitLocker{},
}
request := ctrl.Request{
Expand Down Expand Up @@ -1033,7 +1031,7 @@ func TestBootstrapTokenTTLExtension(t *testing.T) {
SecretCachingClient: myclient,
KubeadmInitLock: &myInitLocker{},
TokenTTL: DefaultTokenTTL,
Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, remoteClient, remoteClient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}),
ClusterCache: clustercache.NewFakeClusterCache(remoteClient, client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}),
}
request := ctrl.Request{
NamespacedName: client.ObjectKey{
Expand Down Expand Up @@ -1279,7 +1277,7 @@ func TestBootstrapTokenRotationMachinePool(t *testing.T) {
SecretCachingClient: myclient,
KubeadmInitLock: &myInitLocker{},
TokenTTL: DefaultTokenTTL,
Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, remoteClient, remoteClient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}),
ClusterCache: clustercache.NewFakeClusterCache(remoteClient, client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}),
}
request := ctrl.Request{
NamespacedName: client.ObjectKey{
Expand Down Expand Up @@ -1602,7 +1600,7 @@ func TestKubeadmConfigReconciler_Reconcile_DiscoveryReconcileBehaviors(t *testin
k := &KubeadmConfigReconciler{
Client: fakeClient,
SecretCachingClient: fakeClient,
Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), fakeClient, fakeClient, fakeClient.Scheme(), client.ObjectKey{Name: tc.cluster.Name, Namespace: tc.cluster.Namespace}),
ClusterCache: clustercache.NewFakeClusterCache(fakeClient, client.ObjectKey{Name: tc.cluster.Name, Namespace: tc.cluster.Namespace}),
KubeadmInitLock: &myInitLocker{},
}

Expand Down Expand Up @@ -1827,7 +1825,7 @@ func TestKubeadmConfigReconciler_Reconcile_AlwaysCheckCAVerificationUnlessReques
reconciler := KubeadmConfigReconciler{
Client: myclient,
SecretCachingClient: myclient,
Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), myclient, myclient, myclient.Scheme(), client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}),
ClusterCache: clustercache.NewFakeClusterCache(myclient, client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}),
KubeadmInitLock: &myInitLocker{},
}

Expand Down
98 changes: 48 additions & 50 deletions bootstrap/kubeadm/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ import (
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
kubeadmbootstrapcontrollers "sigs.k8s.io/cluster-api/bootstrap/kubeadm/controllers"
"sigs.k8s.io/cluster-api/bootstrap/kubeadm/internal/webhooks"
"sigs.k8s.io/cluster-api/controllers/clustercache"
"sigs.k8s.io/cluster-api/controllers/remote"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
"sigs.k8s.io/cluster-api/feature"
Expand All @@ -63,31 +64,31 @@ var (
controllerName = "cluster-api-kubeadm-bootstrap-manager"

// flags.
enableLeaderElection bool
leaderElectionLeaseDuration time.Duration
leaderElectionRenewDeadline time.Duration
leaderElectionRetryPeriod time.Duration
watchFilterValue string
watchNamespace string
profilerAddress string
enableContentionProfiling bool
syncPeriod time.Duration
restConfigQPS float32
restConfigBurst int
clusterCacheTrackerClientQPS float32
clusterCacheTrackerClientBurst int
webhookPort int
webhookCertDir string
webhookCertName string
webhookKeyName string
healthAddr string
managerOptions = flags.ManagerOptions{}
logOptions = logs.NewOptions()
enableLeaderElection bool
leaderElectionLeaseDuration time.Duration
leaderElectionRenewDeadline time.Duration
leaderElectionRetryPeriod time.Duration
watchFilterValue string
watchNamespace string
profilerAddress string
enableContentionProfiling bool
syncPeriod time.Duration
restConfigQPS float32
restConfigBurst int
clusterCacheClientQPS float32
clusterCacheClientBurst int
webhookPort int
webhookCertDir string
webhookCertName string
webhookKeyName string
healthAddr string
managerOptions = flags.ManagerOptions{}
logOptions = logs.NewOptions()
// CABPK specific flags.
clusterConcurrency int
clusterCacheTrackerConcurrency int
kubeadmConfigConcurrency int
tokenTTL time.Duration
clusterConcurrency int
clusterCacheConcurrency int
kubeadmConfigConcurrency int
tokenTTL time.Duration
)

func init() {
Expand Down Expand Up @@ -131,7 +132,7 @@ func InitFlags(fs *pflag.FlagSet) {
"Number of clusters to process simultaneously")
_ = fs.MarkDeprecated("cluster-concurrency", "This flag has no function anymore and is going to be removed in a next release. Use \"--clustercachetracker-concurrency\" instead.")

fs.IntVar(&clusterCacheTrackerConcurrency, "clustercachetracker-concurrency", 10,
fs.IntVar(&clusterCacheConcurrency, "clustercache-concurrency", 100,
"Number of clusters to process simultaneously")

fs.IntVar(&kubeadmConfigConcurrency, "kubeadmconfig-concurrency", 10,
Expand All @@ -146,11 +147,11 @@ func InitFlags(fs *pflag.FlagSet) {
fs.IntVar(&restConfigBurst, "kube-api-burst", 30,
"Maximum number of queries that should be allowed in one burst from the controller client to the Kubernetes API server.")

fs.Float32Var(&clusterCacheTrackerClientQPS, "clustercachetracker-client-qps", 20,
"Maximum queries per second from the cluster cache tracker clients to the Kubernetes API server of workload clusters.")
fs.Float32Var(&clusterCacheClientQPS, "clustercache-client-qps", 20,
"Maximum queries per second from the cluster cache clients to the Kubernetes API server of workload clusters.")

fs.IntVar(&clusterCacheTrackerClientBurst, "clustercachetracker-client-burst", 30,
"Maximum number of queries that should be allowed in one burst from the cluster cache tracker clients to the Kubernetes API server of workload clusters.")
fs.IntVar(&clusterCacheClientBurst, "clustercache-client-burst", 30,
"Maximum number of queries that should be allowed in one burst from the cluster cache clients to the Kubernetes API server of workload clusters.")

fs.DurationVar(&tokenTTL, "bootstrap-token-ttl", kubeadmbootstrapcontrollers.DefaultTokenTTL,
"The amount of time the bootstrap token will be valid")
Expand Down Expand Up @@ -312,35 +313,32 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) {
os.Exit(1)
}

// Set up a ClusterCacheTracker and ClusterCacheReconciler to provide to controllers
// requiring a connection to a remote cluster
tracker, err := remote.NewClusterCacheTracker(
mgr,
remote.ClusterCacheTrackerOptions{
SecretCachingClient: secretCachingClient,
ControllerName: controllerName,
Log: &ctrl.Log,
ClientQPS: clusterCacheTrackerClientQPS,
ClientBurst: clusterCacheTrackerClientBurst,
clusterCache, err := clustercache.SetupWithManager(ctx, mgr, clustercache.Options{
SecretClient: secretCachingClient,
Cache: clustercache.CacheOptions{},
Client: clustercache.ClientOptions{
QPS: clusterCacheClientQPS,
Burst: clusterCacheClientBurst,
UserAgent: remote.DefaultClusterAPIUserAgent(controllerName),
Cache: clustercache.ClientCacheOptions{
DisableFor: []client.Object{
// Don't cache ConfigMaps & Secrets.
&corev1.ConfigMap{},
&corev1.Secret{},
},
},
},
)
if err != nil {
setupLog.Error(err, "unable to create cluster cache tracker")
os.Exit(1)
}
if err := (&remote.ClusterCacheReconciler{
Client: mgr.GetClient(),
Tracker: tracker,
WatchFilterValue: watchFilterValue,
}).SetupWithManager(ctx, mgr, concurrency(clusterCacheTrackerConcurrency)); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "ClusterCacheReconciler")
}, concurrency(clusterCacheConcurrency))
if err != nil {
setupLog.Error(err, "Unable to create ClusterCache")
os.Exit(1)
}

if err := (&kubeadmbootstrapcontrollers.KubeadmConfigReconciler{
Client: mgr.GetClient(),
SecretCachingClient: secretCachingClient,
Tracker: tracker,
ClusterCache: clusterCache,
WatchFilterValue: watchFilterValue,
TokenTTL: tokenTTL,
}).SetupWithManager(ctx, mgr, concurrency(kubeadmConfigConcurrency)); err != nil {
Expand Down
Loading

0 comments on commit 19c80d4

Please sign in to comment.