Skip to content

Commit

Permalink
unit test cases fixed
Browse files Browse the repository at this point in the history
Signed-off-by: vivekr-splunk <94569031+vivekr-splunk@users.noreply.github.com>
  • Loading branch information
vivekr-splunk committed Aug 14, 2023
1 parent e25479c commit 29ec2bb
Show file tree
Hide file tree
Showing 12 changed files with 177 additions and 55 deletions.
4 changes: 2 additions & 2 deletions pkg/splunk/controller/statefulset.go
Original file line number Diff line number Diff line change
Expand Up @@ -353,8 +353,8 @@ func DeleteReferencesToAutomatedMCIfExists(ctx context.Context, client splcommon
func isCurrentCROwner(cr splcommon.MetaObject, currentOwners []metav1.OwnerReference) bool {
// adding extra verification as unit test cases fails since fakeclient do not set UID
return reflect.DeepEqual(currentOwners[0].UID, cr.GetUID()) &&
(currentOwners[0].Kind == cr.GetObjectKind().GroupVersionKind().Kind) &&
(currentOwners[0].Name == cr.GetName())
(currentOwners[0].Kind == cr.GetObjectKind().GroupVersionKind().Kind) &&
(currentOwners[0].Name == cr.GetName())
}

// IsStatefulSetScalingUpOrDown checks if we are currently scaling up or down
Expand Down
1 change: 0 additions & 1 deletion pkg/splunk/enterprise/clustermanager.go
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,6 @@ func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient,
return result, err
}


// check if the ClusterManager is ready for version upgrade, if required
continueReconcile, err := UpgradePathValidation(ctx, client, cr, cr.Spec.CommonSplunkSpec, nil)
if err != nil || !continueReconcile {
Expand Down
43 changes: 39 additions & 4 deletions pkg/splunk/enterprise/clustermanager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1421,6 +1421,14 @@ func TestIsClusterManagerReadyForUpgrade(t *testing.T) {
if err != nil {
t.Errorf("applyLicenseManager should not have returned error; err=%v", err)
}
namespacedName := types.NamespacedName{
Name: "test",
Namespace: "test",
}
err = client.Get(ctx, namespacedName, &lm)
if err != nil {
t.Errorf("get should not have returned error; err=%v", err)
}
lm.Status.Phase = enterpriseApi.PhaseReady
err = client.Status().Update(ctx, &lm)
if err != nil {
Expand Down Expand Up @@ -1454,21 +1462,33 @@ func TestIsClusterManagerReadyForUpgrade(t *testing.T) {
t.Errorf("applyClusterManager should not have returned error; err=%v", err)
}

cm.Spec.Image = "splunk2"
// create pods for license manager
lm.Status.TelAppInstalled = true
lm.Spec.Image = "splunk2"
createPods(t, ctx, client, "license-manager", fmt.Sprintf("splunk-%s-license-manager-0", lm.Name), lm.Namespace, lm.Spec.Image)
updateStatefulSetsInTest(t, ctx, client, 1, fmt.Sprintf("splunk-%s-license-manager", lm.Name), lm.Namespace)
// now the statefulset image in spec is updated to splunk2
_, err = ApplyLicenseManager(ctx, client, &lm)

// now the statefulset and license manager both should be in ready state
_, err = ApplyLicenseManager(ctx, client, &lm)

clusterManager := &enterpriseApi.ClusterManager{}
namespacedName := types.NamespacedName{
namespacedName = types.NamespacedName{
Name: cm.Name,
Namespace: cm.Namespace,
}
err = client.Get(ctx, namespacedName, clusterManager)
if err != nil {
t.Errorf("changeClusterManagerAnnotations should not have returned error=%v", err)
}
clusterManager.Spec.Image = "splunk2"
err = client.Update(ctx, clusterManager)
if err != nil {
t.Errorf("update should not have returned error; err=%v", err)
}

check, err := isClusterManagerReadyForUpgrade(ctx, client, clusterManager)
check, err := UpgradePathValidation(ctx, client, clusterManager, clusterManager.Spec.CommonSplunkSpec, nil)

if err != nil {
t.Errorf("Unexpected upgradeScenario error %v", err)
Expand Down Expand Up @@ -1527,6 +1547,15 @@ func TestChangeClusterManagerAnnotations(t *testing.T) {
if err != nil {
t.Errorf("applyLicenseManager should not have returned error; err=%v", err)
}

namespacedName := types.NamespacedName{
Name: lm.Name,
Namespace: lm.Namespace,
}
err = client.Get(ctx, namespacedName, lm)
if err != nil {
t.Errorf("changeLicenseManagerAnnotations should not have returned error=%v", err)
}
lm.Status.Phase = enterpriseApi.PhaseReady
err = client.Status().Update(ctx, lm)
if err != nil {
Expand All @@ -1544,7 +1573,7 @@ func TestChangeClusterManagerAnnotations(t *testing.T) {
t.Errorf("changeClusterManagerAnnotations should not have returned error=%v", err)
}
clusterManager := &enterpriseApi.ClusterManager{}
namespacedName := types.NamespacedName{
namespacedName = types.NamespacedName{
Name: cm.Name,
Namespace: cm.Namespace,
}
Expand Down Expand Up @@ -1685,6 +1714,12 @@ func TestClusterManagerWitReadyState(t *testing.T) {
Namespace: clustermanager.Namespace,
}

// cluster manager
err = c.Get(ctx, namespacedName, clustermanager)
if err != nil {
t.Errorf("get should not have returned error; err=%v", err)
}

// simulate Ready state
clustermanager.Status.Phase = enterpriseApi.PhaseReady
clustermanager.Spec.ServiceTemplate.Annotations = map[string]string{
Expand Down
6 changes: 5 additions & 1 deletion pkg/splunk/enterprise/clustermaster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1220,7 +1220,11 @@ func TestClusterMasterWitReadyState(t *testing.T) {
Name: clustermaster.Name,
Namespace: clustermaster.Namespace,
}

err = c.Get(ctx, namespacedName, clustermaster)
if err != nil {
t.Errorf("Unexpected get cluster master %v", err)
debug.PrintStack()
}
// simulate Ready state
clustermaster.Status.Phase = enterpriseApi.PhaseReady
clustermaster.Spec.ServiceTemplate.Annotations = map[string]string{
Expand Down
42 changes: 36 additions & 6 deletions pkg/splunk/enterprise/indexercluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -735,7 +735,8 @@ func TestIndexerClusterPodManager(t *testing.T) {
{MetaName: "*v1.StatefulSet-test-splunk-stack1"},
{MetaName: "*v1.Secret-test-splunk-test-secret"},
{MetaName: "*v1.Secret-test-splunk-test-secret"},
{MetaName: "*v1.Pod-test-splunk-stack1-indexer-0"},
//{MetaName: "*v1.Pod-test-splunk-stack1-indexer-0"},
{MetaName: "*v1.Pod-test-splunk-manager1-cluster-manager-0"},
{MetaName: "*v1.Pod-test-splunk-manager1-cluster-manager-0"},
{MetaName: "*v1.Pod-test-splunk-stack1-0"},
}
Expand All @@ -750,7 +751,7 @@ func TestIndexerClusterPodManager(t *testing.T) {
listmockCall := []spltest.MockFuncCall{
{ListOpts: listOpts}}

wantCalls := map[string][]spltest.MockFuncCall{"Get": {funcCalls[0], funcCalls[1], funcCalls[1], funcCalls[4], funcCalls[5]}, "Create": {funcCalls[1]}, "List": {listmockCall[0]}}
wantCalls := map[string][]spltest.MockFuncCall{"Get": {funcCalls[0], funcCalls[1], funcCalls[1], funcCalls[4], funcCalls[4], funcCalls[5]}, "Create": {funcCalls[1]}, "List": {listmockCall[0]}}

// test 1 ready pod
mockHandlers := []spltest.MockHTTPHandler{
Expand Down Expand Up @@ -802,6 +803,7 @@ func TestIndexerClusterPodManager(t *testing.T) {
{MetaName: "*v1.Secret-test-splunk-test-secret"},
{MetaName: "*v1.Secret-test-splunk-test-secret"},
{MetaName: "*v1.Pod-test-splunk-manager1-cluster-manager-0"},
{MetaName: "*v1.Pod-test-splunk-manager1-cluster-manager-0"},
{MetaName: "*v1.Pod-test-splunk-stack1-0"},
{MetaName: "*v1.Pod-test-splunk-stack1-indexer-0"},
{MetaName: "*v1.Pod-test-splunk-stack1-indexer-0"},
Expand All @@ -815,6 +817,7 @@ func TestIndexerClusterPodManager(t *testing.T) {
{MetaName: "*v1.Secret-test-splunk-test-secret"},
{MetaName: "*v1.Secret-test-splunk-test-secret"},
{MetaName: "*v1.Pod-test-splunk-manager1-cluster-manager-0"},
{MetaName: "*v1.Pod-test-splunk-manager1-cluster-manager-0"},
{MetaName: "*v1.Pod-test-splunk-stack1-0"},
}
mockHandlers = []spltest.MockHTTPHandler{mockHandlers[0], mockHandlers[1]}
Expand All @@ -841,7 +844,7 @@ func TestIndexerClusterPodManager(t *testing.T) {
statefulSet.Status.Replicas = 2
statefulSet.Status.ReadyReplicas = 2
statefulSet.Status.UpdatedReplicas = 2
wantCalls = map[string][]spltest.MockFuncCall{"Get": {funcCalls[0], funcCalls[1], funcCalls[1], funcCalls[4]}, "Create": {funcCalls[1]}}
wantCalls = map[string][]spltest.MockFuncCall{"Get": {funcCalls[0], funcCalls[1], funcCalls[1], funcCalls[4], funcCalls[4]}, "Create": {funcCalls[1]}}
method = "indexerClusterPodManager.Update(Pod Not Found)"
indexerClusterPodManagerUpdateTester(t, method, mockHandlers, 1, enterpriseApi.PhaseScalingDown, statefulSet, wantCalls, nil, statefulSet, pod)

Expand All @@ -864,6 +867,7 @@ func TestIndexerClusterPodManager(t *testing.T) {
{MetaName: "*v1.Secret-test-splunk-test-secret"},
{MetaName: "*v1.Pod-test-splunk-manager1-cluster-manager-0"},
{MetaName: "*v1.Pod-test-splunk-manager1-cluster-manager-0"},
{MetaName: "*v1.Pod-test-splunk-manager1-cluster-manager-0"},
{MetaName: "*v1.PersistentVolumeClaim-test-pvc-etc-splunk-stack1-1"},
{MetaName: "*v1.PersistentVolumeClaim-test-pvc-var-splunk-stack1-1"},
}
Expand Down Expand Up @@ -1576,7 +1580,11 @@ func TestIndexerClusterWithReadyState(t *testing.T) {
Name: clustermanager.Name,
Namespace: clustermanager.Namespace,
}

err := c.Get(ctx, namespacedName, clustermanager)
if err != nil {
t.Errorf("Unexpected get cluster manager %v", err)
debug.PrintStack()
}
clustermanager.Status.Phase = enterpriseApi.PhaseReady
clustermanager.Spec.ServiceTemplate.Annotations = map[string]string{
"traffic.sidecar.istio.io/excludeOutboundPorts": "8089,8191,9997",
Expand All @@ -1589,7 +1597,7 @@ func TestIndexerClusterWithReadyState(t *testing.T) {
"app.kubernetes.io/name": "cluster-manager",
"app.kubernetes.io/part-of": "splunk-test-cluster-manager",
}
err := c.Status().Update(ctx, clustermanager)
err = c.Status().Update(ctx, clustermanager)
if err != nil {
t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err)
debug.PrintStack()
Expand Down Expand Up @@ -1755,6 +1763,24 @@ func TestIndexerClusterWithReadyState(t *testing.T) {
// simulate create clustermanager instance before reconcilation
c.Create(ctx, indexercluster)

GetClusterInfoCall = func(ctx context.Context, mgr *indexerClusterPodManager, mockCall bool) (*splclient.ClusterInfo, error) {
cinfo := &splclient.ClusterInfo{
MultiSite: "false",
}
return cinfo, nil
}
GetClusterManagerPeersCall = func(ctx context.Context, mgr *indexerClusterPodManager) (map[string]splclient.ClusterManagerPeerInfo, error) {
response := map[string]splclient.ClusterManagerPeerInfo{
"splunk-test-indexer-0": {
ID: "site-1",
Status: "Up",
ActiveBundleID: "1",
BucketCount: 10,
Searchable: true,
},
}
return response, err
}
_, err = ApplyIndexerClusterManager(ctx, c, indexercluster)
if err != nil {
t.Errorf("Unexpected error while running reconciliation for indexer cluster %v", err)
Expand All @@ -1765,7 +1791,11 @@ func TestIndexerClusterWithReadyState(t *testing.T) {
Name: indexercluster.Name,
Namespace: indexercluster.Namespace,
}

err = c.Get(ctx, namespacedName, indexercluster)
if err != nil {
t.Errorf("Unexpected get indexer cluster %v", err)
debug.PrintStack()
}
// simulate Ready state
indexercluster.Status.Phase = enterpriseApi.PhaseReady
indexercluster.Spec.ServiceTemplate.Annotations = map[string]string{
Expand Down
6 changes: 5 additions & 1 deletion pkg/splunk/enterprise/licensemanager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1075,7 +1075,11 @@ func TestLicenseManagerWithReadyState(t *testing.T) {
Name: licensemanager.Name,
Namespace: licensemanager.Namespace,
}

err = c.Get(ctx, namespacedName, licensemanager)
if err != nil {
t.Errorf("Unexpected get license manager %v", err)
debug.PrintStack()
}
// simulate Ready state
licensemanager.Status.Phase = enterpriseApi.PhaseReady
licensemanager.Spec.ServiceTemplate.Annotations = map[string]string{
Expand Down
35 changes: 30 additions & 5 deletions pkg/splunk/enterprise/monitoringconsole_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,7 @@ func TestApplyMonitoringConsole(t *testing.T) {
{MetaName: "*v1.ConfigMap-test-splunk-stack1-monitoring-console"},
{MetaName: "*v1.ConfigMap-test-splunk-stack1-monitoring-console"},
{MetaName: "*v1.StatefulSet-test-splunk-stack1-monitoring-console"},
{MetaName: "*v1.StatefulSet-test-splunk-stack1-monitoring-console"},
{MetaName: "*v4.MonitoringConsole-test-stack1"},
{MetaName: "*v4.MonitoringConsole-test-stack1"},
}
Expand All @@ -81,15 +82,19 @@ func TestApplyMonitoringConsole(t *testing.T) {
{MetaName: "*v1.Secret-test-splunk-test-secret"},
{MetaName: "*v1.Service-test-splunk-stack1-monitoring-console-headless"},
{MetaName: "*v1.Service-test-splunk-stack1-monitoring-console-service"},

{MetaName: "*v1.StatefulSet-test-splunk-stack1-monitoring-console"},
{MetaName: "*v1.ConfigMap-test-splunk-test-probe-configmap"},
{MetaName: "*v1.Secret-test-splunk-test-secret"},
{MetaName: "*v1.Secret-test-splunk-stack1-monitoring-console-secret-v1"},

{MetaName: "*v1.ConfigMap-test-splunk-stack1-monitoring-console"},
{MetaName: "*v1.ConfigMap-test-splunk-stack1-monitoring-console"},
{MetaName: "*v1.ConfigMap-test-splunk-stack1-monitoring-console"},
{MetaName: "*v1.StatefulSet-test-splunk-stack1-monitoring-console"},
{MetaName: "*v1.StatefulSet-test-splunk-stack1-monitoring-console"},
{MetaName: "*v1.StatefulSet-test-splunk-stack1-monitoring-console"},

{MetaName: "*v4.MonitoringConsole-test-stack1"},
{MetaName: "*v4.MonitoringConsole-test-stack1"},
}
Expand Down Expand Up @@ -875,7 +880,11 @@ func TestMonitoringConsoleWithReadyState(t *testing.T) {
Name: monitoringconsole.Name,
Namespace: monitoringconsole.Namespace,
}

err = c.Get(ctx, namespacedName, monitoringconsole)
if err != nil {
t.Errorf("Unexpected get monitoring console %v", err)
debug.PrintStack()
}
// simulate Ready state
monitoringconsole.Status.Phase = enterpriseApi.PhaseReady
monitoringconsole.Spec.ServiceTemplate.Annotations = map[string]string{
Expand Down Expand Up @@ -1133,6 +1142,14 @@ func TestIsMonitoringConsoleReadyForUpgrade(t *testing.T) {
if err != nil {
t.Errorf("applyClusterManager should not have returned error; err=%v", err)
}
namespacedName := types.NamespacedName{
Name: cm.Name,
Namespace: cm.Namespace,
}
err = client.Get(ctx, namespacedName, &cm)
if err != nil {
t.Errorf("isMonitoringConsoleReadyForUpgrade should not have returned error=%v", err)
}
cm.Status.Phase = enterpriseApi.PhaseReady
err = client.Status().Update(ctx, &cm)
if err != nil {
Expand Down Expand Up @@ -1171,9 +1188,9 @@ func TestIsMonitoringConsoleReadyForUpgrade(t *testing.T) {
_, err = ApplyClusterManager(ctx, client, &cm)

monitoringConsole := &enterpriseApi.MonitoringConsole{}
namespacedName := types.NamespacedName{
Name: cm.Name,
Namespace: cm.Namespace,
namespacedName = types.NamespacedName{
Name: mc.Name,
Namespace: mc.Namespace,
}
err = client.Get(ctx, namespacedName, monitoringConsole)
if err != nil {
Expand Down Expand Up @@ -1239,6 +1256,14 @@ func TestChangeMonitoringConsoleAnnotations(t *testing.T) {
if err != nil {
t.Errorf("applyClusterManager should not have returned error; err=%v", err)
}
namespacedName := types.NamespacedName{
Name: cm.Name,
Namespace: cm.Namespace,
}
err = client.Get(ctx, namespacedName, cm)
if err != nil {
t.Errorf("changeMonitoringConsoleAnnotations should not have returned error=%v", err)
}
cm.Status.Phase = enterpriseApi.PhaseReady
err = client.Status().Update(ctx, cm)
if err != nil {
Expand All @@ -1256,7 +1281,7 @@ func TestChangeMonitoringConsoleAnnotations(t *testing.T) {
t.Errorf("changeMonitoringConsoleAnnotations should not have returned error=%v", err)
}
monitoringConsole := &enterpriseApi.MonitoringConsole{}
namespacedName := types.NamespacedName{
namespacedName = types.NamespacedName{
Name: cm.Name,
Namespace: cm.Namespace,
}
Expand Down
1 change: 0 additions & 1 deletion pkg/splunk/enterprise/searchheadcluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -459,7 +459,6 @@ func (mgr *searchHeadClusterPodManager) Update(ctx context.Context, c splcommon.
return splctrl.UpdateStatefulSetPods(ctx, mgr.c, statefulSet, mgr, desiredReplicas)
}


// PrepareScaleDown for searchHeadClusterPodManager prepares search head pod to be removed via scale down event; it returns true when ready
func (mgr *searchHeadClusterPodManager) PrepareScaleDown(ctx context.Context, n int32) (bool, error) {
// start by quarantining the pod
Expand Down
Loading

0 comments on commit 29ec2bb

Please sign in to comment.