Skip to content

Commit

Permalink
Adding testcases for controlled and completed pod status notify
Browse files Browse the repository at this point in the history
Signed-off-by: Feny Mehta <fbm3307@gmail.com>
  • Loading branch information
fbm3307 committed Oct 25, 2023
1 parent 826d302 commit 56bf1e7
Show file tree
Hide file tree
Showing 2 changed files with 72 additions and 9 deletions.
6 changes: 4 additions & 2 deletions controllers/idler/idler_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,7 @@ func (r *Reconciler) ensureIdling(logger logr.Logger, idler *toolchainv1alpha1.I
podreason = podtype.Reason
}
}

// Check if it belongs to a controller (Deployment, DeploymentConfig, etc) and scale it down to zero.
appType, appName, deletedByController, err := r.scaleControllerToZero(podLogger, pod.ObjectMeta)
if err != nil {
Expand All @@ -156,8 +157,9 @@ func (r *Reconciler) ensureIdling(logger logr.Logger, idler *toolchainv1alpha1.I
appName = pod.Name
appType = "Pod"
}
// Do not send notification if Pod Not managed by a controller and is in completed state
if podreason != "PodCompleted" && !deletedByController {
// Send notification if the deleted pod was managed by a controller or was a standalone pod that was not completed
// eg. If a build pod is in "PodCompleted" status then it was not running so there's no reason to send an idler notification
if podreason != "PodCompleted" || deletedByController {
// By now either a pod has been deleted or scaled to zero by controller, idler Triggered notification should be sent
if err := r.createNotification(logger, idler, appName, appType); err != nil {
logger.Error(err, "failed to create Notification")
Expand Down
75 changes: 68 additions & 7 deletions controllers/idler/idler_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -682,7 +682,7 @@ func TestAppNameTypeForControllers(t *testing.T) {
}
}

func TestAppNameTypeForInidividualPods(t *testing.T) {
func TestNotificationAppNameTypeForPods(t *testing.T) {
//given
idler := &toolchainv1alpha1.Idler{
ObjectMeta: metav1.ObjectMeta{
Expand All @@ -698,10 +698,11 @@ func TestAppNameTypeForInidividualPods(t *testing.T) {
nsTmplSet := newNSTmplSet(test.MemberOperatorNs, "feny", "advanced", "abcde11", namespaces, usernames)
mur := newMUR("feny")

t.Run("Test AppName/Type in notification", func(t *testing.T) {
t.Run("Test AppName/Type in notification for individual pod in non-completed status", func(t *testing.T) {
reconciler, req, cl, _ := prepareReconcile(t, idler.Name, getHostCluster, idler, nsTmplSet, mur)
idlerTimeoutPlusOneSecondAgo := time.Now().Add(-time.Duration(idler.Spec.TimeoutSeconds+1) * time.Second)
p := preparePayloadsSinglePod(t, reconciler, idler.Name, "todelete-", idlerTimeoutPlusOneSecondAgo).standalonePods[0]
pcond := corev1.PodCondition{Type: "Ready", Reason: ""}
p := preparePayloadsSinglePod(t, reconciler, idler.Name, "todelete-", idlerTimeoutPlusOneSecondAgo, pcond).standalonePods[0]

// first reconcile to track pods
res, err := reconciler.Reconcile(context.TODO(), req)
Expand Down Expand Up @@ -756,6 +757,66 @@ func TestAppNameTypeForInidividualPods(t *testing.T) {

})

t.Run("Test AppName/Type in notification for controlled pod in non-completed status", func(t *testing.T) {
reconciler, req, cl, _ := prepareReconcile(t, idler.Name, getHostCluster, idler, nsTmplSet, mur)
idlerTimeoutPlusOneSecondAgo := time.Now().Add(-time.Duration(idler.Spec.TimeoutSeconds+1) * time.Second)
pcond := corev1.PodCondition{Type: "Ready", Reason: ""}
preparePayloads(t, reconciler, idler.Name, "todelete-", idlerTimeoutPlusOneSecondAgo, pcond)

// first reconcile to track pods
res, err := reconciler.Reconcile(context.TODO(), req)
assert.NoError(t, err)
assert.True(t, res.Requeue)

// second reconcile should delete pods and create notification
res, err = reconciler.Reconcile(context.TODO(), req)
//then
assert.NoError(t, err)
memberoperatortest.AssertThatIdler(t, idler.Name, cl).
HasConditions(memberoperatortest.Running(), memberoperatortest.IdlerNotificationCreated())
//check the notification is actually created
hostCl, _ := reconciler.GetHostCluster()
notification := &toolchainv1alpha1.Notification{}
err = hostCl.Client.Get(context.TODO(), types.NamespacedName{
Namespace: test.HostOperatorNs,
Name: "feny-stage-idled",
}, notification)
require.NoError(t, err)
require.Equal(t, "feny@test.com", notification.Spec.Recipient)
require.Equal(t, "idled", notification.Labels[toolchainv1alpha1.NotificationTypeLabelKey])
require.Equal(t, "Deployment", notification.Spec.Context["AppType"])
})

t.Run("Test AppName/Type in notification for controlled pod in completed status", func(t *testing.T) {
reconciler, req, cl, _ := prepareReconcile(t, idler.Name, getHostCluster, idler, nsTmplSet, mur)
idlerTimeoutPlusOneSecondAgo := time.Now().Add(-time.Duration(idler.Spec.TimeoutSeconds+1) * time.Second)
pcond := corev1.PodCondition{Type: "Ready", Reason: "PodCompleted"}
preparePayloads(t, reconciler, idler.Name, "todelete-", idlerTimeoutPlusOneSecondAgo, pcond)

// first reconcile to track pods
res, err := reconciler.Reconcile(context.TODO(), req)
assert.NoError(t, err)
assert.True(t, res.Requeue)

// second reconcile should delete pods and create notification
res, err = reconciler.Reconcile(context.TODO(), req)
//then
assert.NoError(t, err)
memberoperatortest.AssertThatIdler(t, idler.Name, cl).
HasConditions(memberoperatortest.Running(), memberoperatortest.IdlerNotificationCreated())
//check the notification is actually created
hostCl, _ := reconciler.GetHostCluster()
notification := &toolchainv1alpha1.Notification{}
err = hostCl.Client.Get(context.TODO(), types.NamespacedName{
Namespace: test.HostOperatorNs,
Name: "feny-stage-idled",
}, notification)
require.NoError(t, err)
require.Equal(t, "feny@test.com", notification.Spec.Recipient)
require.Equal(t, "idled", notification.Labels[toolchainv1alpha1.NotificationTypeLabelKey])
require.Equal(t, "Deployment", notification.Spec.Context["AppType"])
})

}
func TestCreateNotification(t *testing.T) {
idler := &toolchainv1alpha1.Idler{
Expand Down Expand Up @@ -988,7 +1049,7 @@ type payloads struct {
job *batchv1.Job
}

func preparePayloads(t *testing.T, r *Reconciler, namespace, namePrefix string, startTime time.Time) payloads {
func preparePayloads(t *testing.T, r *Reconciler, namespace, namePrefix string, startTime time.Time, conditions ...corev1.PodCondition) payloads {
sTime := metav1.NewTime(startTime)
replicas := int32(3)

Expand All @@ -1007,7 +1068,7 @@ func preparePayloads(t *testing.T, r *Reconciler, namespace, namePrefix string,
require.NoError(t, err)
err = r.AllNamespacesClient.Create(context.TODO(), rs)
require.NoError(t, err)
controlledPods := createPods(t, r, rs, sTime, make([]*corev1.Pod, 0, 3))
controlledPods := createPods(t, r, rs, sTime, make([]*corev1.Pod, 0, 3), conditions...)

// Deployment with Camel K integration as an owner reference and a scale sub resource
integration := &appsv1.Deployment{
Expand Down Expand Up @@ -1182,11 +1243,11 @@ func preparePayloadsSinglePod(t *testing.T, r *Reconciler, namespace, namePrefix
}
}

func createPods(t *testing.T, r *Reconciler, owner metav1.Object, startTime metav1.Time, podsToTrack []*corev1.Pod) []*corev1.Pod {
func createPods(t *testing.T, r *Reconciler, owner metav1.Object, startTime metav1.Time, podsToTrack []*corev1.Pod, conditions ...corev1.PodCondition) []*corev1.Pod {
for i := 0; i < 3; i++ {
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("%s-pod-%d", owner.GetName(), i), Namespace: owner.GetNamespace()},
Status: corev1.PodStatus{StartTime: &startTime},
Status: corev1.PodStatus{StartTime: &startTime, Conditions: conditions},
}
err := controllerutil.SetControllerReference(owner, pod, r.Scheme)
require.NoError(t, err)
Expand Down

0 comments on commit 56bf1e7

Please sign in to comment.