diff --git a/apis/v1alpha1/internalmembercluster_types.go b/apis/v1alpha1/internalmembercluster_types.go index 1b6e4ecc3..2bb624331 100644 --- a/apis/v1alpha1/internalmembercluster_types.go +++ b/apis/v1alpha1/internalmembercluster_types.go @@ -100,28 +100,17 @@ func (m *InternalMemberCluster) GetConditionWithType(agentType AgentType, condit // GetAgentStatus is used to retrieve agent status from internal member cluster, // if it doesn't exist it creates the expected agent status and returns it. func (m *InternalMemberCluster) GetAgentStatus(agentType AgentType) *AgentStatus { - // TODO: Refactor method - var desiredAgentStatus AgentStatus - for _, agentStatus := range m.Status.AgentStatus { - if agentStatus.Type == agentType { - desiredAgentStatus = agentStatus - } - } - - if desiredAgentStatus.Type == "" { - desiredAgentStatus = AgentStatus{ - Type: MemberAgent, - Conditions: []metav1.Condition{}, - } - m.Status.AgentStatus = append(m.Status.AgentStatus, desiredAgentStatus) - } - for i := range m.Status.AgentStatus { if m.Status.AgentStatus[i].Type == agentType { return &m.Status.AgentStatus[i] } } - return nil + agentStatus := AgentStatus{ + Type: agentType, + Conditions: []metav1.Condition{}, + } + m.Status.AgentStatus = append(m.Status.AgentStatus, agentStatus) + return &m.Status.AgentStatus[len(m.Status.AgentStatus)-1] } func init() { diff --git a/docker/hub-agent.Dockerfile b/docker/hub-agent.Dockerfile index f441d6467..31644571c 100644 --- a/docker/hub-agent.Dockerfile +++ b/docker/hub-agent.Dockerfile @@ -1,5 +1,5 @@ # Build the hubagent binary -FROM golang:1.17 as builder +FROM golang:1.18 as builder WORKDIR /workspace # Copy the Go Modules manifests diff --git a/docker/member-agent.Dockerfile b/docker/member-agent.Dockerfile index fc047d6bf..f882be8c0 100644 --- a/docker/member-agent.Dockerfile +++ b/docker/member-agent.Dockerfile @@ -1,5 +1,5 @@ # Build the memberagent binary -FROM golang:1.17 as builder +FROM golang:1.18 as builder WORKDIR /workspace # Copy the Go Modules manifests diff --git a/docker/refresh-token.Dockerfile b/docker/refresh-token.Dockerfile index f1065cc3d..9d2c63a51 100644 --- a/docker/refresh-token.Dockerfile +++ b/docker/refresh-token.Dockerfile @@ -1,5 +1,5 @@ # Build the hubagent binary -FROM golang:1.17 as builder +FROM golang:1.18 as builder WORKDIR /workspace # Copy the Go Modules manifests diff --git a/pkg/controllers/membercluster/membercluster_controller.go b/pkg/controllers/membercluster/membercluster_controller.go index 545ec3cd8..c5a3664f2 100644 --- a/pkg/controllers/membercluster/membercluster_controller.go +++ b/pkg/controllers/membercluster/membercluster_controller.go @@ -48,9 +48,11 @@ const ( // Reconciler reconciles a MemberCluster object type Reconciler struct { client.Client - recorder record.EventRecorder - NetworkingAgentsEnabled bool // if networking agents are enabled, need to handle unjoin before leave - numberOfAgents int + recorder record.EventRecorder + // Need to update MC based on the IMC conditions based on the agent list. + NetworkingAgentsEnabled bool + // agents are used as hashset to query the expected agent type, so the value will be ignored. + agents map[fleetv1alpha1.AgentType]string } func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { @@ -362,19 +364,32 @@ func (r *Reconciler) updateMemberClusterStatus(ctx context.Context, mc *fleetv1a // aggregateJoinedCondition is used to calculate and mark the joined or left status for member cluster based on join conditions from all agents. func (r *Reconciler) aggregateJoinedCondition(mc *fleetv1alpha1.MemberCluster) { klog.V(5).InfoS("syncJoinedCondition", "memberCluster", klog.KObj(mc)) - // TODO: Fix condition - if len(mc.Status.AgentStatus) < r.numberOfAgents { + if len(mc.Status.AgentStatus) < len(r.agents) { + markMemberClusterUnknown(r.recorder, mc) return } joined := true left := true + reportedAgents := make(map[fleetv1alpha1.AgentType]bool) for _, agentStatus := range mc.Status.AgentStatus { - conditions := agentStatus.Conditions - condition := meta.FindStatusCondition(conditions, string(fleetv1alpha1.AgentJoined)) - if condition != nil { - joined = joined && condition.Status == metav1.ConditionTrue - left = left && condition.Status == metav1.ConditionFalse + if _, found := r.agents[agentStatus.Type]; !found { + klog.V(2).InfoS("Ignoring unexpected agent type status", "agentStatus", agentStatus) + continue // ignore any unexpected agent type + } + condition := meta.FindStatusCondition(agentStatus.Conditions, string(fleetv1alpha1.AgentJoined)) + if condition == nil { + markMemberClusterUnknown(r.recorder, mc) + return } + + joined = joined && condition.Status == metav1.ConditionTrue + left = left && condition.Status == metav1.ConditionFalse + reportedAgents[agentStatus.Type] = true + } + + if len(reportedAgents) < len(r.agents) { + markMemberClusterUnknown(r.recorder, mc) + return } if joined && !left { @@ -471,10 +486,12 @@ func markMemberClusterUnknown(recorder record.EventRecorder, mc apis.Conditioned // SetupWithManager sets up the controller with the Manager. func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { r.recorder = mgr.GetEventRecorderFor("memberCluster") + r.agents = make(map[fleetv1alpha1.AgentType]string) + r.agents[fleetv1alpha1.MemberAgent] = "" + if r.NetworkingAgentsEnabled { - r.numberOfAgents = 3 - } else { - r.numberOfAgents = 1 + r.agents[fleetv1alpha1.MultiClusterServiceAgent] = "" + r.agents[fleetv1alpha1.ServiceExportImportAgent] = "" } return ctrl.NewControllerManagedBy(mgr). For(&fleetv1alpha1.MemberCluster{}). diff --git a/pkg/controllers/membercluster/membercluster_controller_integration_test.go b/pkg/controllers/membercluster/membercluster_controller_integration_test.go index 69af17200..0e8a54a46 100644 --- a/pkg/controllers/membercluster/membercluster_controller_integration_test.go +++ b/pkg/controllers/membercluster/membercluster_controller_integration_test.go @@ -9,10 +9,13 @@ import ( "fmt" "time" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" @@ -33,148 +36,547 @@ var _ = Describe("Test MemberCluster Controller", func() { namespaceName string memberClusterNamespacedName types.NamespacedName r *Reconciler + ignoreOption = cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime") ) - BeforeEach(func() { - ctx = context.Background() - memberClusterName = utils.RandStr() - namespaceName = fmt.Sprintf(utils.NamespaceNameFormat, memberClusterName) - memberClusterNamespacedName = types.NamespacedName{ - Name: memberClusterName, - } - - By("create the member cluster reconciler") - r = &Reconciler{ - Client: k8sClient, - } - err := r.SetupWithManager(mgr) - Expect(err).ToNot(HaveOccurred()) - - By("create member cluster for join") - mc := &fleetv1alpha1.MemberCluster{ - TypeMeta: metav1.TypeMeta{ - Kind: "MemberCluster", - APIVersion: fleetv1alpha1.GroupVersion.Version, - }, - ObjectMeta: metav1.ObjectMeta{ + Context("Test membercluster controller without networking agents", func() { + BeforeEach(func() { + ctx = context.Background() + memberClusterName = utils.RandStr() + namespaceName = fmt.Sprintf(utils.NamespaceNameFormat, memberClusterName) + memberClusterNamespacedName = types.NamespacedName{ Name: memberClusterName, - }, - Spec: fleetv1alpha1.MemberClusterSpec{ - State: fleetv1alpha1.ClusterStateJoin, - Identity: rbacv1.Subject{ - Kind: rbacv1.ServiceAccountKind, - Name: "hub-access", + } + + By("create the member cluster reconciler") + r = &Reconciler{ + Client: k8sClient, + } + err := r.SetupWithManager(mgr) + Expect(err).Should(Succeed()) + + By("create member cluster for join") + mc := &fleetv1alpha1.MemberCluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "MemberCluster", + APIVersion: fleetv1alpha1.GroupVersion.Version, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: memberClusterName, }, - }, - } - Expect(k8sClient.Create(ctx, mc)).Should(Succeed()) + Spec: fleetv1alpha1.MemberClusterSpec{ + State: fleetv1alpha1.ClusterStateJoin, + Identity: rbacv1.Subject{ + Kind: rbacv1.ServiceAccountKind, + Name: "hub-access", + }, + }, + } + Expect(k8sClient.Create(ctx, mc)).Should(Succeed()) + + By("trigger reconcile to initiate the join workflow") + result, err := r.Reconcile(ctx, ctrl.Request{ + NamespacedName: memberClusterNamespacedName, + }) + Expect(result).Should(Equal(ctrl.Result{})) + Expect(err).Should(Succeed()) + + var ns corev1.Namespace + var role rbacv1.Role + var roleBinding rbacv1.RoleBinding + var imc fleetv1alpha1.InternalMemberCluster + Expect(k8sClient.Get(ctx, types.NamespacedName{Name: namespaceName}, &ns)).Should(Succeed()) + Expect(k8sClient.Get(ctx, types.NamespacedName{Name: memberClusterName, Namespace: namespaceName}, &imc)).Should(Succeed()) + Expect(k8sClient.Get(ctx, types.NamespacedName{Name: fmt.Sprintf(utils.RoleNameFormat, memberClusterName), Namespace: namespaceName}, &role)).Should(Succeed()) + Expect(k8sClient.Get(ctx, types.NamespacedName{Name: fmt.Sprintf(utils.RoleBindingNameFormat, memberClusterName), Namespace: namespaceName}, &roleBinding)).Should(Succeed()) + + By("simulate member agent updating internal member cluster status") + imc.Status.ResourceUsage.Capacity = utils.NewResourceList() + imc.Status.ResourceUsage.Allocatable = utils.NewResourceList() + imc.Status.ResourceUsage.ObservationTime = metav1.Now() + joinedCondition := metav1.Condition{ + Type: string(fleetv1alpha1.AgentJoined), + Status: metav1.ConditionTrue, + Reason: reasonMemberClusterJoined, + ObservedGeneration: imc.GetGeneration(), + } + imc.SetConditionsWithType(fleetv1alpha1.MemberAgent, joinedCondition) + Expect(k8sClient.Status().Update(ctx, &imc)).Should(Succeed()) + + By("trigger reconcile again to update member cluster status to joined") + result, err = r.Reconcile(ctx, ctrl.Request{ + NamespacedName: memberClusterNamespacedName, + }) + Expect(result).Should(Equal(ctrl.Result{})) + Expect(err).Should(Succeed()) + }) - By("trigger reconcile to initiate the join workflow") - result, err := r.Reconcile(ctx, ctrl.Request{ - NamespacedName: memberClusterNamespacedName, + AfterEach(func() { + var ns corev1.Namespace + Expect(k8sClient.Get(ctx, types.NamespacedName{Name: namespaceName}, &ns)).Should(Succeed()) + + By("Deleting the namespace") + Eventually(func() error { + return k8sClient.Delete(ctx, &ns) + }, timeout, interval).Should(SatisfyAny(Succeed(), &utils.NotFoundMatcher{})) }) - Expect(result).Should(Equal(ctrl.Result{})) - Expect(err).Should(Not(HaveOccurred())) - - var ns corev1.Namespace - var role rbacv1.Role - var roleBinding rbacv1.RoleBinding - var imc fleetv1alpha1.InternalMemberCluster - Expect(k8sClient.Get(ctx, types.NamespacedName{Name: namespaceName}, &ns)).Should(Succeed()) - Expect(k8sClient.Get(ctx, types.NamespacedName{Name: memberClusterName, Namespace: namespaceName}, &imc)).Should(Succeed()) - Expect(k8sClient.Get(ctx, types.NamespacedName{Name: fmt.Sprintf(utils.RoleNameFormat, memberClusterName), Namespace: namespaceName}, &role)).Should(Succeed()) - Expect(k8sClient.Get(ctx, types.NamespacedName{Name: fmt.Sprintf(utils.RoleBindingNameFormat, memberClusterName), Namespace: namespaceName}, &roleBinding)).Should(Succeed()) - - By("simulate member agent updating internal member cluster status") - imc.Status.ResourceUsage.Capacity = utils.NewResourceList() - imc.Status.ResourceUsage.Allocatable = utils.NewResourceList() - imc.Status.ResourceUsage.ObservationTime = metav1.Now() - joinedCondition := metav1.Condition{ - Type: string(fleetv1alpha1.AgentJoined), - Status: metav1.ConditionTrue, - Reason: reasonMemberClusterJoined, - ObservedGeneration: imc.GetGeneration(), - } - heartBeatReceivedCondition := metav1.Condition{ - Type: string(fleetv1alpha1.AgentHealthy), - Status: metav1.ConditionTrue, - Reason: "InternalMemberClusterHeartbeatReceived", - ObservedGeneration: imc.GetGeneration(), - } - imc.SetConditionsWithType(fleetv1alpha1.MemberAgent, joinedCondition, heartBeatReceivedCondition) - Expect(k8sClient.Status().Update(ctx, &imc)).Should(Succeed()) - - By("trigger reconcile again to update member cluster status to joined") - result, err = r.Reconcile(ctx, ctrl.Request{ - NamespacedName: memberClusterNamespacedName, + + It("should create namespace, role, role binding and internal member cluster & mark member cluster as joined", func() { + var mc fleetv1alpha1.MemberCluster + Expect(k8sClient.Get(ctx, memberClusterNamespacedName, &mc)).Should(Succeed()) + + readyToJoinCondition := mc.GetCondition(fleetv1alpha1.ConditionTypeMemberClusterReadyToJoin) + Expect(readyToJoinCondition).NotTo(BeNil()) + Expect(readyToJoinCondition.Status).To(Equal(metav1.ConditionTrue)) + Expect(readyToJoinCondition.Reason).To(Equal(reasonMemberClusterReadyToJoin)) + + joinCondition := mc.GetCondition(fleetv1alpha1.ConditionTypeMemberClusterJoin) + Expect(joinCondition).NotTo(BeNil()) + Expect(joinCondition.Status).To(Equal(metav1.ConditionTrue)) + Expect(joinCondition.Reason).To(Equal(reasonMemberClusterJoined)) }) - Expect(result).Should(Equal(ctrl.Result{})) - Expect(err).Should(Not(HaveOccurred())) - }) - AfterEach(func() { - var ns corev1.Namespace - Expect(k8sClient.Get(ctx, types.NamespacedName{Name: namespaceName}, &ns)).Should(Succeed()) + It("member cluster is marked as left after leave workflow is completed", func() { + By("Update member cluster's spec to leave") + var mc fleetv1alpha1.MemberCluster + Expect(k8sClient.Get(ctx, memberClusterNamespacedName, &mc)).Should(Succeed()) + mc.Spec.State = fleetv1alpha1.ClusterStateLeave + Expect(k8sClient.Update(ctx, &mc)) - By("Deleting the namespace") - Eventually(func() error { - return k8sClient.Delete(ctx, &ns) - }, timeout, interval).Should(SatisfyAny(Succeed(), &utils.NotFoundMatcher{})) - }) + By("trigger reconcile again to initiate leave workflow") + result, err := r.Reconcile(ctx, ctrl.Request{ + NamespacedName: memberClusterNamespacedName, + }) + Expect(result).Should(Equal(ctrl.Result{})) + Expect(err).Should(Succeed()) + + var imc fleetv1alpha1.InternalMemberCluster + Expect(k8sClient.Get(ctx, types.NamespacedName{Name: memberClusterName, Namespace: namespaceName}, &imc)).Should(Succeed()) + Expect(imc.Spec.State).To(Equal(fleetv1alpha1.ClusterStateLeave)) - It("should create namespace, role, role binding and internal member cluster & mark member cluster as joined", func() { - var mc fleetv1alpha1.MemberCluster - Expect(k8sClient.Get(ctx, memberClusterNamespacedName, &mc)).Should(Succeed()) + By("mark Internal Member Cluster as left") + imcLeftCondition := metav1.Condition{ + Type: string(fleetv1alpha1.AgentJoined), + Status: metav1.ConditionFalse, + Reason: "InternalMemberClusterLeft", + ObservedGeneration: imc.GetGeneration(), + } + imc.SetConditionsWithType(fleetv1alpha1.MemberAgent, imcLeftCondition) + Expect(k8sClient.Status().Update(ctx, &imc)).Should(Succeed()) - readyToJoinCondition := mc.GetCondition(fleetv1alpha1.ConditionTypeMemberClusterReadyToJoin) - Expect(readyToJoinCondition).NotTo(BeNil()) - Expect(readyToJoinCondition.Status).To(Equal(metav1.ConditionTrue)) - Expect(readyToJoinCondition.Reason).To(Equal(reasonMemberClusterReadyToJoin)) + By("trigger reconcile again to mark member cluster as left") + result, err = r.Reconcile(ctx, ctrl.Request{ + NamespacedName: memberClusterNamespacedName, + }) + Expect(result).Should(Equal(ctrl.Result{})) + Expect(err).Should(Succeed()) - joinCondition := mc.GetCondition(fleetv1alpha1.ConditionTypeMemberClusterJoin) - Expect(joinCondition).NotTo(BeNil()) - Expect(joinCondition.Status).To(Equal(metav1.ConditionTrue)) - Expect(joinCondition.Reason).To(Equal(reasonMemberClusterJoined)) + Expect(k8sClient.Get(ctx, memberClusterNamespacedName, &mc)).Should(Succeed()) + mcLeftCondition := mc.GetCondition(fleetv1alpha1.ConditionTypeMemberClusterJoin) + Expect(mcLeftCondition.Status).To(Equal(metav1.ConditionFalse)) + Expect(mcLeftCondition.Reason).To(Equal(reasonMemberClusterLeft)) + }) }) - It("member cluster is marked as left after leave workflow is completed", func() { - By("Update member cluster's spec to leave") - var mc fleetv1alpha1.MemberCluster - Expect(k8sClient.Get(ctx, memberClusterNamespacedName, &mc)).Should(Succeed()) - mc.Spec.State = fleetv1alpha1.ClusterStateLeave - Expect(k8sClient.Update(ctx, &mc)) + Context("Test membercluster controller with enabling networking agents", func() { + BeforeEach(func() { + ctx = context.Background() + memberClusterName = utils.RandStr() + namespaceName = fmt.Sprintf(utils.NamespaceNameFormat, memberClusterName) + memberClusterNamespacedName = types.NamespacedName{ + Name: memberClusterName, + } + + By("create the member cluster reconciler") + r = &Reconciler{ + Client: k8sClient, + NetworkingAgentsEnabled: true, + } + err := r.SetupWithManager(mgr) + Expect(err).Should(Succeed()) + + By("create member cluster for join") + mc := &fleetv1alpha1.MemberCluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "MemberCluster", + APIVersion: fleetv1alpha1.GroupVersion.Version, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: memberClusterName, + }, + Spec: fleetv1alpha1.MemberClusterSpec{ + State: fleetv1alpha1.ClusterStateJoin, + Identity: rbacv1.Subject{ + Kind: rbacv1.ServiceAccountKind, + Name: "hub-access", + }, + }, + } + Expect(k8sClient.Create(ctx, mc)).Should(Succeed()) + + By("trigger reconcile to initiate the join workflow") + result, err := r.Reconcile(ctx, ctrl.Request{ + NamespacedName: memberClusterNamespacedName, + }) + Expect(result).Should(Equal(ctrl.Result{})) + Expect(err).Should(Succeed()) + + var ns corev1.Namespace + var role rbacv1.Role + var roleBinding rbacv1.RoleBinding + var imc fleetv1alpha1.InternalMemberCluster + Expect(k8sClient.Get(ctx, types.NamespacedName{Name: namespaceName}, &ns)).Should(Succeed()) + Expect(k8sClient.Get(ctx, types.NamespacedName{Name: memberClusterName, Namespace: namespaceName}, &imc)).Should(Succeed()) + Expect(k8sClient.Get(ctx, types.NamespacedName{Name: fmt.Sprintf(utils.RoleNameFormat, memberClusterName), Namespace: namespaceName}, &role)).Should(Succeed()) + Expect(k8sClient.Get(ctx, types.NamespacedName{Name: fmt.Sprintf(utils.RoleBindingNameFormat, memberClusterName), Namespace: namespaceName}, &roleBinding)).Should(Succeed()) + + By("simulate member agent updating internal member cluster status") + imc.Status.ResourceUsage.Capacity = corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + } + imc.Status.ResourceUsage.Allocatable = corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("1Gi"), + } + imc.Status.ResourceUsage.ObservationTime = metav1.Now() + joinedCondition := metav1.Condition{ + Type: string(fleetv1alpha1.AgentJoined), + Status: metav1.ConditionTrue, + Reason: reasonMemberClusterJoined, + ObservedGeneration: imc.GetGeneration(), + } + imc.SetConditionsWithType(fleetv1alpha1.MemberAgent, joinedCondition) + Expect(k8sClient.Status().Update(ctx, &imc)).Should(Succeed()) + + By("trigger reconcile again to update member cluster status to joined") + result, err = r.Reconcile(ctx, ctrl.Request{ + NamespacedName: memberClusterNamespacedName, + }) + Expect(result).Should(Equal(ctrl.Result{})) + Expect(err).Should(Succeed()) + }) + + AfterEach(func() { + var ns corev1.Namespace + Expect(k8sClient.Get(ctx, types.NamespacedName{Name: namespaceName}, &ns)).Should(Succeed()) - By("trigger reconcile again to initiate leave workflow") - result, err := r.Reconcile(ctx, ctrl.Request{ - NamespacedName: memberClusterNamespacedName, + By("Deleting the namespace") + Eventually(func() error { + return k8sClient.Delete(ctx, &ns) + }, timeout, interval).Should(SatisfyAny(Succeed(), &utils.NotFoundMatcher{})) }) - Expect(result).Should(Equal(ctrl.Result{})) - Expect(err).Should(Not(HaveOccurred())) - - var imc fleetv1alpha1.InternalMemberCluster - Expect(k8sClient.Get(ctx, types.NamespacedName{Name: memberClusterName, Namespace: namespaceName}, &imc)).Should(Succeed()) - Expect(imc.Spec.State).To(Equal(fleetv1alpha1.ClusterStateLeave)) - - By("mark Internal Member Cluster as left") - imcLeftCondition := metav1.Condition{ - Type: string(fleetv1alpha1.AgentJoined), - Status: metav1.ConditionFalse, - Reason: "InternalMemberClusterLeft", - ObservedGeneration: imc.GetGeneration(), - } - imc.SetConditionsWithType(fleetv1alpha1.MemberAgent, imcLeftCondition) - Expect(k8sClient.Status().Update(ctx, &imc)).Should(Succeed()) - - By("trigger reconcile again to mark member cluster as left") - result, err = r.Reconcile(ctx, ctrl.Request{ - NamespacedName: memberClusterNamespacedName, + + It("should create namespace, role, role binding and internal member cluster & mark member cluster as joined", func() { + By("getting imc status") + var imc fleetv1alpha1.InternalMemberCluster + Expect(k8sClient.Get(ctx, types.NamespacedName{Name: memberClusterName, Namespace: namespaceName}, &imc)).Should(Succeed()) + + var mc fleetv1alpha1.MemberCluster + By("checking mc status") + Expect(k8sClient.Get(ctx, memberClusterNamespacedName, &mc)).Should(Succeed()) + + wantMC := fleetv1alpha1.MemberClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: fleetv1alpha1.ConditionTypeMemberClusterReadyToJoin, + Status: metav1.ConditionTrue, + Reason: reasonMemberClusterReadyToJoin, + ObservedGeneration: mc.GetGeneration(), + }, + { + Type: fleetv1alpha1.ConditionTypeMemberClusterJoin, + Status: metav1.ConditionUnknown, + Reason: reasonMemberClusterUnknown, + ObservedGeneration: mc.GetGeneration(), + }, + }, + ResourceUsage: imc.Status.ResourceUsage, + AgentStatus: imc.Status.AgentStatus, + } + Expect(cmp.Diff(wantMC, mc.Status, ignoreOption)).Should(BeEmpty()) + + By("simulate multiClusterService agent updating internal member cluster status as joined") + joinedCondition := metav1.Condition{ + Type: string(fleetv1alpha1.AgentJoined), + Status: metav1.ConditionTrue, + Reason: reasonMemberClusterJoined, + ObservedGeneration: imc.GetGeneration(), + } + imc.SetConditionsWithType(fleetv1alpha1.MultiClusterServiceAgent, joinedCondition) + Expect(k8sClient.Status().Update(ctx, &imc)).Should(Succeed()) + + By("trigger reconcile again to update member cluster status") + result, err := r.Reconcile(ctx, ctrl.Request{ + NamespacedName: memberClusterNamespacedName, + }) + Expect(result).Should(Equal(ctrl.Result{})) + Expect(err).Should(Succeed()) + + By("getting imc status") + Expect(k8sClient.Get(ctx, types.NamespacedName{Name: memberClusterName, Namespace: namespaceName}, &imc)).Should(Succeed()) + + By("checking mc status") + Expect(k8sClient.Get(ctx, memberClusterNamespacedName, &mc)).Should(Succeed()) + + wantMC = fleetv1alpha1.MemberClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: fleetv1alpha1.ConditionTypeMemberClusterReadyToJoin, + Status: metav1.ConditionTrue, + Reason: reasonMemberClusterReadyToJoin, + ObservedGeneration: mc.GetGeneration(), + }, + { + Type: fleetv1alpha1.ConditionTypeMemberClusterJoin, + Status: metav1.ConditionUnknown, + Reason: reasonMemberClusterUnknown, + ObservedGeneration: mc.GetGeneration(), + }, + }, + ResourceUsage: imc.Status.ResourceUsage, + AgentStatus: imc.Status.AgentStatus, + } + Expect(cmp.Diff(wantMC, mc.Status, ignoreOption)).Should(BeEmpty()) + + By("simulate serviceExportImport agent updating internal member cluster status as unknown") + joinedCondition = metav1.Condition{ + Type: string(fleetv1alpha1.AgentJoined), + Status: metav1.ConditionUnknown, + Reason: reasonMemberClusterUnknown, + ObservedGeneration: imc.GetGeneration(), + } + imc.SetConditionsWithType(fleetv1alpha1.ServiceExportImportAgent, joinedCondition) + Expect(k8sClient.Status().Update(ctx, &imc)).Should(Succeed()) + + By("trigger reconcile again to update member cluster status") + result, err = r.Reconcile(ctx, ctrl.Request{ + NamespacedName: memberClusterNamespacedName, + }) + Expect(result).Should(Equal(ctrl.Result{})) + Expect(err).Should(Succeed()) + + By("getting imc status") + Expect(k8sClient.Get(ctx, types.NamespacedName{Name: memberClusterName, Namespace: namespaceName}, &imc)).Should(Succeed()) + + By("checking mc status") + Expect(k8sClient.Get(ctx, memberClusterNamespacedName, &mc)).Should(Succeed()) + + wantMC = fleetv1alpha1.MemberClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: fleetv1alpha1.ConditionTypeMemberClusterReadyToJoin, + Status: metav1.ConditionTrue, + Reason: reasonMemberClusterReadyToJoin, + ObservedGeneration: mc.GetGeneration(), + }, + { + Type: fleetv1alpha1.ConditionTypeMemberClusterJoin, + Status: metav1.ConditionUnknown, + Reason: reasonMemberClusterUnknown, + ObservedGeneration: mc.GetGeneration(), + }, + }, + ResourceUsage: imc.Status.ResourceUsage, + AgentStatus: imc.Status.AgentStatus, + } + Expect(cmp.Diff(wantMC, mc.Status, ignoreOption)).Should(BeEmpty()) + + By("simulate serviceExportImport agent updating internal member cluster status as joined") + joinedCondition = metav1.Condition{ + Type: string(fleetv1alpha1.AgentJoined), + Status: metav1.ConditionTrue, + Reason: reasonMemberClusterJoined, + ObservedGeneration: imc.GetGeneration(), + } + imc.SetConditionsWithType(fleetv1alpha1.ServiceExportImportAgent, joinedCondition) + Expect(k8sClient.Status().Update(ctx, &imc)).Should(Succeed()) + + By("trigger reconcile again to update member cluster status") + result, err = r.Reconcile(ctx, ctrl.Request{ + NamespacedName: memberClusterNamespacedName, + }) + Expect(result).Should(Equal(ctrl.Result{})) + Expect(err).Should(Succeed()) + + By("getting imc status") + Expect(k8sClient.Get(ctx, types.NamespacedName{Name: memberClusterName, Namespace: namespaceName}, &imc)).Should(Succeed()) + + By("checking mc status") + Expect(k8sClient.Get(ctx, memberClusterNamespacedName, &mc)).Should(Succeed()) + + wantMC = fleetv1alpha1.MemberClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: fleetv1alpha1.ConditionTypeMemberClusterReadyToJoin, + Status: metav1.ConditionTrue, + Reason: reasonMemberClusterReadyToJoin, + ObservedGeneration: mc.GetGeneration(), + }, + { + Type: fleetv1alpha1.ConditionTypeMemberClusterJoin, + Status: metav1.ConditionTrue, + Reason: reasonMemberClusterJoined, + ObservedGeneration: mc.GetGeneration(), + }, + }, + ResourceUsage: imc.Status.ResourceUsage, + AgentStatus: imc.Status.AgentStatus, + } + Expect(cmp.Diff(wantMC, mc.Status, ignoreOption)).Should(BeEmpty()) }) - Expect(result).Should(Equal(ctrl.Result{})) - Expect(err).Should(Not(HaveOccurred())) - Expect(k8sClient.Get(ctx, memberClusterNamespacedName, &mc)).Should(Succeed()) - mcLeftCondition := mc.GetCondition(fleetv1alpha1.ConditionTypeMemberClusterJoin) - Expect(mcLeftCondition.Status).To(Equal(metav1.ConditionFalse)) - Expect(mcLeftCondition.Reason).To(Equal(reasonMemberClusterLeft)) + It("member cluster is marked as left after leave workflow is completed", func() { + By("Update member cluster's spec to leave") + var mc fleetv1alpha1.MemberCluster + Expect(k8sClient.Get(ctx, memberClusterNamespacedName, &mc)).Should(Succeed()) + mc.Spec.State = fleetv1alpha1.ClusterStateLeave + Expect(k8sClient.Update(ctx, &mc)) + + By("trigger reconcile again to initiate leave workflow") + result, err := r.Reconcile(ctx, ctrl.Request{ + NamespacedName: memberClusterNamespacedName, + }) + Expect(result).Should(Equal(ctrl.Result{})) + Expect(err).Should(Succeed()) + + var imc fleetv1alpha1.InternalMemberCluster + Expect(k8sClient.Get(ctx, types.NamespacedName{Name: memberClusterName, Namespace: namespaceName}, &imc)).Should(Succeed()) + Expect(imc.Spec.State).To(Equal(fleetv1alpha1.ClusterStateLeave)) + + By("getting imc status") + Expect(k8sClient.Get(ctx, types.NamespacedName{Name: memberClusterName, Namespace: namespaceName}, &imc)).Should(Succeed()) + + By("member agent marks Internal Member Cluster as left") + imcLeftCondition := metav1.Condition{ + Type: string(fleetv1alpha1.AgentJoined), + Status: metav1.ConditionFalse, + Reason: "InternalMemberClusterLeft", + ObservedGeneration: imc.GetGeneration(), + } + imc.SetConditionsWithType(fleetv1alpha1.MemberAgent, imcLeftCondition) + Expect(k8sClient.Status().Update(ctx, &imc)).Should(Succeed()) + + By("trigger reconcile again to initiate leave workflow") + result, err = r.Reconcile(ctx, ctrl.Request{ + NamespacedName: memberClusterNamespacedName, + }) + Expect(result).Should(Equal(ctrl.Result{})) + Expect(err).Should(Succeed()) + + By("checking mc status") + Expect(k8sClient.Get(ctx, memberClusterNamespacedName, &mc)).Should(Succeed()) + + wantMC := fleetv1alpha1.MemberClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: fleetv1alpha1.ConditionTypeMemberClusterReadyToJoin, + Status: metav1.ConditionTrue, + Reason: reasonMemberClusterReadyToJoin, + ObservedGeneration: mc.GetGeneration(), // should be old observedGeneration + }, + { + Type: fleetv1alpha1.ConditionTypeMemberClusterJoin, + Status: metav1.ConditionUnknown, + Reason: reasonMemberClusterUnknown, + ObservedGeneration: mc.GetGeneration(), + }, + }, + ResourceUsage: imc.Status.ResourceUsage, + AgentStatus: imc.Status.AgentStatus, + } + options := cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime", "ObservedGeneration") + // ignore the ObservedGeneration here cause controller won't update the ReadyToJoin condition. + Expect(cmp.Diff(wantMC, mc.Status, options)).Should(BeEmpty()) + + By("multiClusterService agent marks Internal Member Cluster as joined") + imcLeftCondition = metav1.Condition{ + Type: string(fleetv1alpha1.AgentJoined), + Status: metav1.ConditionTrue, + Reason: "InternalMemberClusterJoined", + ObservedGeneration: imc.GetGeneration(), + } + imc.SetConditionsWithType(fleetv1alpha1.MultiClusterServiceAgent, imcLeftCondition) + Expect(k8sClient.Status().Update(ctx, &imc)).Should(Succeed()) + + By("trigger reconcile again to initiate leave workflow") + result, err = r.Reconcile(ctx, ctrl.Request{ + NamespacedName: memberClusterNamespacedName, + }) + Expect(result).Should(Equal(ctrl.Result{})) + Expect(err).Should(Succeed()) + + By("checking mc status") + Expect(k8sClient.Get(ctx, memberClusterNamespacedName, &mc)).Should(Succeed()) + + wantMC = fleetv1alpha1.MemberClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: fleetv1alpha1.ConditionTypeMemberClusterReadyToJoin, + Status: metav1.ConditionTrue, + Reason: reasonMemberClusterReadyToJoin, + ObservedGeneration: mc.GetGeneration(), // should be old observedGeneration + }, + { + Type: fleetv1alpha1.ConditionTypeMemberClusterJoin, + Status: metav1.ConditionUnknown, + Reason: reasonMemberClusterUnknown, + ObservedGeneration: mc.GetGeneration(), + }, + }, + ResourceUsage: imc.Status.ResourceUsage, + AgentStatus: imc.Status.AgentStatus, + } + // ignore the ObservedGeneration here cause controller won't update the ReadyToJoin condition. + Expect(cmp.Diff(wantMC, mc.Status, options)).Should(BeEmpty()) + + By("multiClusterService and serviceExportImport agent mark Internal Member Cluster as left") + imcLeftCondition = metav1.Condition{ + Type: string(fleetv1alpha1.AgentJoined), + Status: metav1.ConditionFalse, + Reason: "InternalMemberClusterLeft", + ObservedGeneration: imc.GetGeneration(), + } + imc.SetConditionsWithType(fleetv1alpha1.MultiClusterServiceAgent, imcLeftCondition) + + imcLeftCondition = metav1.Condition{ + Type: string(fleetv1alpha1.AgentJoined), + Status: metav1.ConditionFalse, + Reason: "InternalMemberClusterLeft", + ObservedGeneration: imc.GetGeneration(), + } + imc.SetConditionsWithType(fleetv1alpha1.ServiceExportImportAgent, imcLeftCondition) + Expect(k8sClient.Status().Update(ctx, &imc)).Should(Succeed()) + + By("trigger reconcile again to initiate leave workflow") + result, err = r.Reconcile(ctx, ctrl.Request{ + NamespacedName: memberClusterNamespacedName, + }) + Expect(result).Should(Equal(ctrl.Result{})) + Expect(err).Should(Succeed()) + + By("checking mc status") + Expect(k8sClient.Get(ctx, memberClusterNamespacedName, &mc)).Should(Succeed()) + + wantMC = fleetv1alpha1.MemberClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: fleetv1alpha1.ConditionTypeMemberClusterReadyToJoin, + Status: metav1.ConditionTrue, + Reason: reasonMemberClusterReadyToJoin, + ObservedGeneration: mc.GetGeneration(), // should be old observedGeneration + }, + { + Type: fleetv1alpha1.ConditionTypeMemberClusterJoin, + Status: metav1.ConditionFalse, + Reason: reasonMemberClusterLeft, + ObservedGeneration: mc.GetGeneration(), + }, + }, + ResourceUsage: imc.Status.ResourceUsage, + AgentStatus: imc.Status.AgentStatus, + } + // ignore the ObservedGeneration here cause controller won't update the ReadyToJoin condition. + Expect(cmp.Diff(wantMC, mc.Status, options)).Should(BeEmpty()) + }) }) + }) diff --git a/pkg/controllers/membercluster/membercluster_controller_test.go b/pkg/controllers/membercluster/membercluster_controller_test.go index 1c3fc8314..ab5cce290 100644 --- a/pkg/controllers/membercluster/membercluster_controller_test.go +++ b/pkg/controllers/membercluster/membercluster_controller_test.go @@ -636,7 +636,13 @@ func TestSyncInternalMemberClusterStatus(t *testing.T) { wantedMemberCluster *fleetv1alpha1.MemberCluster }{ "copy with Joined condition": { - r: &Reconciler{recorder: utils.NewFakeRecorder(1), numberOfAgents: 2}, + r: &Reconciler{ + recorder: utils.NewFakeRecorder(1), + agents: map[fleetv1alpha1.AgentType]string{ + fleetv1alpha1.MemberAgent: "", + fleetv1alpha1.ServiceExportImportAgent: "", + }, + }, internalMemberCluster: &fleetv1alpha1.InternalMemberCluster{ Status: fleetv1alpha1.InternalMemberClusterStatus{ ResourceUsage: fleetv1alpha1.ResourceUsage{ @@ -721,7 +727,12 @@ func TestSyncInternalMemberClusterStatus(t *testing.T) { }, }, "copy with Left condition": { - r: &Reconciler{recorder: utils.NewFakeRecorder(1), numberOfAgents: 1}, + r: &Reconciler{ + recorder: utils.NewFakeRecorder(1), + agents: map[fleetv1alpha1.AgentType]string{ + fleetv1alpha1.MemberAgent: "", + }, + }, internalMemberCluster: &fleetv1alpha1.InternalMemberCluster{ Status: fleetv1alpha1.InternalMemberClusterStatus{ ResourceUsage: fleetv1alpha1.ResourceUsage{ @@ -806,7 +817,13 @@ func TestSyncInternalMemberClusterStatus(t *testing.T) { }, }, "copy with Unknown condition": { - r: &Reconciler{recorder: utils.NewFakeRecorder(1), numberOfAgents: 1}, + r: &Reconciler{ + recorder: utils.NewFakeRecorder(1), + agents: map[fleetv1alpha1.AgentType]string{ + fleetv1alpha1.MemberAgent: "", + fleetv1alpha1.ServiceExportImportAgent: "", + }, + }, internalMemberCluster: &fleetv1alpha1.InternalMemberCluster{ Status: fleetv1alpha1.InternalMemberClusterStatus{ ResourceUsage: fleetv1alpha1.ResourceUsage{ @@ -825,7 +842,7 @@ func TestSyncInternalMemberClusterStatus(t *testing.T) { { Type: string(fleetv1alpha1.AgentJoined), Status: metav1.ConditionTrue, - Reason: "Left", + Reason: "Joined", }, }, LastReceivedHeartbeat: now, @@ -870,7 +887,7 @@ func TestSyncInternalMemberClusterStatus(t *testing.T) { { Type: string(fleetv1alpha1.AgentJoined), Status: metav1.ConditionTrue, - Reason: "Left", + Reason: "Joined", }, }, LastReceivedHeartbeat: now, @@ -891,7 +908,12 @@ func TestSyncInternalMemberClusterStatus(t *testing.T) { }, }, "No Agent Status": { - r: &Reconciler{recorder: utils.NewFakeRecorder(1), numberOfAgents: 1}, + r: &Reconciler{ + recorder: utils.NewFakeRecorder(1), + agents: map[fleetv1alpha1.AgentType]string{ + fleetv1alpha1.MemberAgent: "", + }, + }, internalMemberCluster: &fleetv1alpha1.InternalMemberCluster{ Status: fleetv1alpha1.InternalMemberClusterStatus{ ResourceUsage: fleetv1alpha1.ResourceUsage{ @@ -917,15 +939,363 @@ func TestSyncInternalMemberClusterStatus(t *testing.T) { }, ObservationTime: now, }, + Conditions: []metav1.Condition{ + { + Type: fleetv1alpha1.ConditionTypeMemberClusterJoin, + Status: metav1.ConditionUnknown, + Reason: reasonMemberClusterUnknown, + }, + }, }, }, }, "Internal member cluster is nil": { - r: &Reconciler{recorder: utils.NewFakeRecorder(1), numberOfAgents: 1}, + r: &Reconciler{ + recorder: utils.NewFakeRecorder(1), + agents: map[fleetv1alpha1.AgentType]string{ + fleetv1alpha1.MemberAgent: "", + }, + }, internalMemberCluster: nil, memberCluster: &fleetv1alpha1.MemberCluster{}, wantedMemberCluster: &fleetv1alpha1.MemberCluster{}, }, + "other agent type reported in the status and should be ignored": { + r: &Reconciler{ + recorder: utils.NewFakeRecorder(1), + agents: map[fleetv1alpha1.AgentType]string{ + fleetv1alpha1.MemberAgent: "", + fleetv1alpha1.ServiceExportImportAgent: "", + }, + }, + internalMemberCluster: &fleetv1alpha1.InternalMemberCluster{ + Status: fleetv1alpha1.InternalMemberClusterStatus{ + ResourceUsage: fleetv1alpha1.ResourceUsage{ + Capacity: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + }, + Allocatable: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + ObservationTime: now, + }, + AgentStatus: []fleetv1alpha1.AgentStatus{ + { + Type: fleetv1alpha1.MemberAgent, + Conditions: []metav1.Condition{ + { + Type: string(fleetv1alpha1.AgentJoined), + Status: metav1.ConditionTrue, + Reason: "Joined", + }, + }, + LastReceivedHeartbeat: now, + }, + { + Type: fleetv1alpha1.ServiceExportImportAgent, + Conditions: []metav1.Condition{ + { + Type: string(fleetv1alpha1.AgentJoined), + Status: metav1.ConditionTrue, + Reason: "Joined", + }, + }, + LastReceivedHeartbeat: now, + }, + { + Type: fleetv1alpha1.MultiClusterServiceAgent, + Conditions: []metav1.Condition{ + { + Type: string(fleetv1alpha1.AgentJoined), + Status: metav1.ConditionFalse, + Reason: "Left", + }, + }, + LastReceivedHeartbeat: now, + }, + }, + }, + }, + memberCluster: &fleetv1alpha1.MemberCluster{}, + wantedMemberCluster: &fleetv1alpha1.MemberCluster{ + Status: fleetv1alpha1.MemberClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: fleetv1alpha1.ConditionTypeMemberClusterJoin, + Status: metav1.ConditionTrue, + Reason: reasonMemberClusterJoined, + }, + }, + ResourceUsage: fleetv1alpha1.ResourceUsage{ + Capacity: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + }, + Allocatable: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + ObservationTime: now, + }, + AgentStatus: []fleetv1alpha1.AgentStatus{ + { + Type: fleetv1alpha1.MemberAgent, + Conditions: []metav1.Condition{ + { + Type: string(fleetv1alpha1.AgentJoined), + Status: metav1.ConditionTrue, + Reason: "Joined", + }, + }, + LastReceivedHeartbeat: now, + }, + { + Type: fleetv1alpha1.ServiceExportImportAgent, + Conditions: []metav1.Condition{ + { + Type: string(fleetv1alpha1.AgentJoined), + Status: metav1.ConditionTrue, + Reason: "Joined", + }, + }, + LastReceivedHeartbeat: now, + }, + { + Type: fleetv1alpha1.MultiClusterServiceAgent, + Conditions: []metav1.Condition{ + { + Type: string(fleetv1alpha1.AgentJoined), + Status: metav1.ConditionFalse, + Reason: "Left", + }, + }, + LastReceivedHeartbeat: now, + }, + }, + }, + }, + }, + "less agent type reported in the status": { + r: &Reconciler{ + recorder: utils.NewFakeRecorder(1), + agents: map[fleetv1alpha1.AgentType]string{ + fleetv1alpha1.MemberAgent: "", + fleetv1alpha1.ServiceExportImportAgent: "", + }, + }, + internalMemberCluster: &fleetv1alpha1.InternalMemberCluster{ + Status: fleetv1alpha1.InternalMemberClusterStatus{ + ResourceUsage: fleetv1alpha1.ResourceUsage{ + Capacity: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + }, + Allocatable: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + ObservationTime: now, + }, + AgentStatus: []fleetv1alpha1.AgentStatus{ + { + Type: fleetv1alpha1.MemberAgent, + Conditions: []metav1.Condition{ + { + Type: string(fleetv1alpha1.AgentJoined), + Status: metav1.ConditionTrue, + Reason: "Joined", + }, + }, + LastReceivedHeartbeat: now, + }, + }, + }, + }, + memberCluster: &fleetv1alpha1.MemberCluster{}, + wantedMemberCluster: &fleetv1alpha1.MemberCluster{ + Status: fleetv1alpha1.MemberClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: fleetv1alpha1.ConditionTypeMemberClusterJoin, + Status: metav1.ConditionUnknown, + Reason: reasonMemberClusterUnknown, + }, + }, + ResourceUsage: fleetv1alpha1.ResourceUsage{ + Capacity: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + }, + Allocatable: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + ObservationTime: now, + }, + AgentStatus: []fleetv1alpha1.AgentStatus{ + { + Type: fleetv1alpha1.MemberAgent, + Conditions: []metav1.Condition{ + { + Type: string(fleetv1alpha1.AgentJoined), + Status: metav1.ConditionTrue, + Reason: "Joined", + }, + }, + LastReceivedHeartbeat: now, + }, + }, + }, + }, + }, + "condition is not reported in the status": { + r: &Reconciler{ + recorder: utils.NewFakeRecorder(1), + agents: map[fleetv1alpha1.AgentType]string{ + fleetv1alpha1.MemberAgent: "", + fleetv1alpha1.ServiceExportImportAgent: "", + }, + }, + internalMemberCluster: &fleetv1alpha1.InternalMemberCluster{ + Status: fleetv1alpha1.InternalMemberClusterStatus{ + ResourceUsage: fleetv1alpha1.ResourceUsage{ + Capacity: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + }, + Allocatable: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + ObservationTime: now, + }, + AgentStatus: []fleetv1alpha1.AgentStatus{ + { + Type: fleetv1alpha1.MemberAgent, + Conditions: []metav1.Condition{ + { + Type: string(fleetv1alpha1.AgentJoined), + Status: metav1.ConditionTrue, + Reason: "Joined", + }, + }, + LastReceivedHeartbeat: now, + }, + { + Type: fleetv1alpha1.ServiceExportImportAgent, + LastReceivedHeartbeat: now, + }, + }, + }, + }, + memberCluster: &fleetv1alpha1.MemberCluster{}, + wantedMemberCluster: &fleetv1alpha1.MemberCluster{ + Status: fleetv1alpha1.MemberClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: fleetv1alpha1.ConditionTypeMemberClusterJoin, + Status: metav1.ConditionUnknown, + Reason: reasonMemberClusterUnknown, + }, + }, + ResourceUsage: fleetv1alpha1.ResourceUsage{ + Capacity: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + }, + Allocatable: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + ObservationTime: now, + }, + AgentStatus: []fleetv1alpha1.AgentStatus{ + { + Type: fleetv1alpha1.MemberAgent, + Conditions: []metav1.Condition{ + { + Type: string(fleetv1alpha1.AgentJoined), + Status: metav1.ConditionTrue, + Reason: "Joined", + }, + }, + LastReceivedHeartbeat: now, + }, + { + Type: fleetv1alpha1.ServiceExportImportAgent, + LastReceivedHeartbeat: now, + }, + }, + }, + }, + }, + "agent type is not reported in the status": { + r: &Reconciler{ + recorder: utils.NewFakeRecorder(1), + agents: map[fleetv1alpha1.AgentType]string{ + fleetv1alpha1.MemberAgent: "", + fleetv1alpha1.ServiceExportImportAgent: "", + }, + }, + internalMemberCluster: &fleetv1alpha1.InternalMemberCluster{ + Status: fleetv1alpha1.InternalMemberClusterStatus{ + ResourceUsage: fleetv1alpha1.ResourceUsage{ + Capacity: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + }, + Allocatable: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + ObservationTime: now, + }, + AgentStatus: []fleetv1alpha1.AgentStatus{ + { + Type: fleetv1alpha1.MemberAgent, + Conditions: []metav1.Condition{ + { + Type: string(fleetv1alpha1.AgentJoined), + Status: metav1.ConditionTrue, + Reason: "Joined", + }, + }, + LastReceivedHeartbeat: now, + }, + { + Type: fleetv1alpha1.MultiClusterServiceAgent, + LastReceivedHeartbeat: now, + }, + }, + }, + }, + memberCluster: &fleetv1alpha1.MemberCluster{}, + wantedMemberCluster: &fleetv1alpha1.MemberCluster{ + Status: fleetv1alpha1.MemberClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: fleetv1alpha1.ConditionTypeMemberClusterJoin, + Status: metav1.ConditionUnknown, + Reason: reasonMemberClusterUnknown, + }, + }, + ResourceUsage: fleetv1alpha1.ResourceUsage{ + Capacity: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + }, + Allocatable: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + ObservationTime: now, + }, + AgentStatus: []fleetv1alpha1.AgentStatus{ + { + Type: fleetv1alpha1.MemberAgent, + Conditions: []metav1.Condition{ + { + Type: string(fleetv1alpha1.AgentJoined), + Status: metav1.ConditionTrue, + Reason: "Joined", + }, + }, + LastReceivedHeartbeat: now, + }, + { + Type: fleetv1alpha1.MultiClusterServiceAgent, + LastReceivedHeartbeat: now, + }, + }, + }, + }, + }, } for testName, tt := range tests {