diff --git a/pkg/sync/common/types.go b/pkg/sync/common/types.go index eb851af83..7399cc78a 100644 --- a/pkg/sync/common/types.go +++ b/pkg/sync/common/types.go @@ -27,6 +27,8 @@ const ( SyncOptionPruneLast = "PruneLast=true" // Sync option that enables use of replace or create command instead of apply SyncOptionReplace = "Replace=true" + // Sync option that enables use of --force flag, delete and re-create + SyncOptionForce = "Force=true" // Sync option that enables use of --server-side flag instead of client-side SyncOptionServerSideApply = "ServerSideApply=true" // Sync option that disables resource deletion diff --git a/pkg/sync/sync_context.go b/pkg/sync/sync_context.go index 3b43333f4..fb57b8273 100644 --- a/pkg/sync/sync_context.go +++ b/pkg/sync/sync_context.go @@ -459,8 +459,8 @@ func (sc *syncContext) Sync() { // if pruned tasks pending deletion, then wait... prunedTasksPendingDelete := tasks.Filter(func(t *syncTask) bool { if t.pruned() && t.liveObj != nil { - return t.liveObj.GetDeletionTimestamp() != nil - } + return t.liveObj.GetDeletionTimestamp() != nil + } return false }) if prunedTasksPendingDelete.Len() > 0 { @@ -761,31 +761,31 @@ func (sc *syncContext) getSyncTasks() (_ syncTasks, successful bool) { // for prune tasks, modify the waves for proper cleanup i.e reverse of sync wave (creation order) pruneTasks := make(map[int][]*syncTask) for _, task := range tasks { - if task.isPrune() { - pruneTasks[task.wave()] = append(pruneTasks[task.wave()], task) - } + if task.isPrune() { + pruneTasks[task.wave()] = append(pruneTasks[task.wave()], task) + } } - + var uniquePruneWaves []int for k := range pruneTasks { - uniquePruneWaves = append(uniquePruneWaves, k) + uniquePruneWaves = append(uniquePruneWaves, k) } sort.Ints(uniquePruneWaves) - + // reorder waves for pruning tasks using symmetric swap on prune waves n := len(uniquePruneWaves) for i := 0; i < n/2; i++ { - // waves to swap - startWave := uniquePruneWaves[i] - endWave := uniquePruneWaves[n-1-i] - - for _, task := range pruneTasks[startWave] { + // waves to swap + startWave := uniquePruneWaves[i] + endWave := uniquePruneWaves[n-1-i] + + for _, task := range pruneTasks[startWave] { task.waveOverride = &endWave - } - - for _, task := range pruneTasks[endWave] { + } + + for _, task := range pruneTasks[endWave] { task.waveOverride = &startWave - } + } } // for pruneLast tasks, modify the wave to sync phase last wave of tasks + 1 @@ -940,7 +940,7 @@ func (sc *syncContext) ensureCRDReady(name string) error { }) } -func (sc *syncContext) applyObject(t *syncTask, dryRun, force, validate bool) (common.ResultCode, string) { +func (sc *syncContext) applyObject(t *syncTask, dryRun, validate bool) (common.ResultCode, string) { dryRunStrategy := cmdutil.DryRunNone if dryRun { // irrespective of the dry run mode set in the sync context, always run @@ -954,6 +954,7 @@ func (sc *syncContext) applyObject(t *syncTask, dryRun, force, validate bool) (c var err error var message string shouldReplace := sc.replace || resourceutil.HasAnnotationOption(t.targetObj, common.AnnotationSyncOptions, common.SyncOptionReplace) + force := sc.force || resourceutil.HasAnnotationOption(t.targetObj, common.AnnotationSyncOptions, common.SyncOptionForce) // if it is a dry run, disable server side apply, as the goal is to validate only the // yaml correctness of the rendered manifests. // running dry-run in server mode breaks the auto create namespace feature @@ -1233,7 +1234,7 @@ func (sc *syncContext) processCreateTasks(state runState, tasks syncTasks, dryRu logCtx := sc.log.WithValues("dryRun", dryRun, "task", t) logCtx.V(1).Info("Applying") validate := sc.validate && !resourceutil.HasAnnotationOption(t.targetObj, common.AnnotationSyncOptions, common.SyncOptionsDisableValidation) - result, message := sc.applyObject(t, dryRun, sc.force, validate) + result, message := sc.applyObject(t, dryRun, validate) if result == common.ResultCodeSyncFailed { logCtx.WithValues("message", message).Info("Apply failed") state = failed diff --git a/pkg/sync/sync_context_test.go b/pkg/sync/sync_context_test.go index 2e2740609..7cc47be5d 100644 --- a/pkg/sync/sync_context_test.go +++ b/pkg/sync/sync_context_test.go @@ -840,6 +840,52 @@ func TestSync_ServerSideApply(t *testing.T) { } } +func withForceAnnotation(un *unstructured.Unstructured) *unstructured.Unstructured { + un.SetAnnotations(map[string]string{synccommon.AnnotationSyncOptions: synccommon.SyncOptionForce}) + return un +} + +func withForceAndReplaceAnnotations(un *unstructured.Unstructured) *unstructured.Unstructured { + un.SetAnnotations(map[string]string{synccommon.AnnotationSyncOptions: "Force=true,Replace=true"}) + return un +} + +func TestSync_Force(t *testing.T) { + testCases := []struct { + name string + target *unstructured.Unstructured + live *unstructured.Unstructured + commandUsed string + force bool + }{ + {"NoAnnotation", NewPod(), NewPod(), "apply", false}, + {"ForceApplyAnnotationIsSet", withForceAnnotation(NewPod()), NewPod(), "apply", true}, + {"ForceReplaceAnnotationIsSet", withForceAndReplaceAnnotations(NewPod()), NewPod(), "replace", true}, + {"LiveObjectMissing", withReplaceAnnotation(NewPod()), nil, "create", false}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + syncCtx := newTestSyncCtx(nil) + + tc.target.SetNamespace(FakeArgoCDNamespace) + if tc.live != nil { + tc.live.SetNamespace(FakeArgoCDNamespace) + } + syncCtx.resources = groupResources(ReconciliationResult{ + Live: []*unstructured.Unstructured{tc.live}, + Target: []*unstructured.Unstructured{tc.target}, + }) + + syncCtx.Sync() + + resourceOps, _ := syncCtx.resourceOps.(*kubetest.MockResourceOps) + assert.Equal(t, tc.commandUsed, resourceOps.GetLastResourceCommand(kube.GetResourceKey(tc.target))) + assert.Equal(t, tc.force, resourceOps.GetLastForce()) + }) + } +} + func TestSelectiveSyncOnly(t *testing.T) { pod1 := NewPod() pod1.SetName("pod-1") @@ -1771,11 +1817,11 @@ func TestWaveReorderingOfPruneTasks(t *testing.T) { // no change in wave order expectedWaveOrder: map[string]int{ // new wave // original wave - ns.GetName(): 0, // 0 - pod1.GetName(): 1, // 1 - pod2.GetName(): 2, // 2 - pod3.GetName(): 3, // 3 - pod4.GetName(): 4, // 4 + ns.GetName(): 0, // 0 + pod1.GetName(): 1, // 1 + pod2.GetName(): 2, // 2 + pod3.GetName(): 3, // 3 + pod4.GetName(): 4, // 4 }, }, { @@ -1785,11 +1831,11 @@ func TestWaveReorderingOfPruneTasks(t *testing.T) { // change in prune wave order expectedWaveOrder: map[string]int{ // new wave // original wave - ns.GetName(): 4, // 0 - pod1.GetName(): 3, // 1 - pod2.GetName(): 2, // 2 - pod3.GetName(): 1, // 3 - pod4.GetName(): 0, // 4 + ns.GetName(): 4, // 0 + pod1.GetName(): 3, // 1 + pod2.GetName(): 2, // 2 + pod3.GetName(): 1, // 3 + pod4.GetName(): 0, // 4 }, }, { @@ -1799,13 +1845,13 @@ func TestWaveReorderingOfPruneTasks(t *testing.T) { // change in prune wave order expectedWaveOrder: map[string]int{ // new wave // original wave - pod1.GetName(): 4, // 1 - pod3.GetName(): 3, // 3 - pod4.GetName(): 1, // 4 + pod1.GetName(): 4, // 1 + pod3.GetName(): 3, // 3 + pod4.GetName(): 1, // 4 // no change since non prune tasks - ns.GetName(): 0, // 0 - pod2.GetName(): 2, // 2 + ns.GetName(): 0, // 0 + pod2.GetName(): 2, // 2 }, }, } @@ -1830,13 +1876,13 @@ func TestWaveReorderingOfPruneTasks(t *testing.T) { // change in prune wave order expectedWaveOrder: map[string]int{ // new wave // original wave - pod1.GetName(): 5, // 1 - pod2.GetName(): 5, // 2 - pod3.GetName(): 5, // 3 - pod4.GetName(): 5, // 4 + pod1.GetName(): 5, // 1 + pod2.GetName(): 5, // 2 + pod3.GetName(): 5, // 3 + pod4.GetName(): 5, // 4 // no change since non prune tasks - ns.GetName(): 0, // 0 + ns.GetName(): 0, // 0 }, }, { @@ -1847,13 +1893,13 @@ func TestWaveReorderingOfPruneTasks(t *testing.T) { // change in wave order expectedWaveOrder: map[string]int{ // new wave // original wave - pod1.GetName(): 4, // 1 - pod2.GetName(): 5, // 2 - pod3.GetName(): 2, // 3 - pod4.GetName(): 1, // 4 + pod1.GetName(): 4, // 1 + pod2.GetName(): 5, // 2 + pod3.GetName(): 2, // 3 + pod4.GetName(): 1, // 4 // no change since non prune tasks - ns.GetName(): 0, // 0 + ns.GetName(): 0, // 0 }, }, } @@ -1877,11 +1923,11 @@ func TestWaveReorderingOfPruneTasks(t *testing.T) { // change in prune wave order expectedWaveOrder: map[string]int{ // new wave // original wave - pod1.GetName(): 5, // 1 - pod3.GetName(): 4, // 3 - pod4.GetName(): 4, // 3 - pod5.GetName(): 3, // 4 - pod7.GetName(): 1, // 5 + pod1.GetName(): 5, // 1 + pod3.GetName(): 4, // 3 + pod4.GetName(): 4, // 3 + pod5.GetName(): 3, // 4 + pod7.GetName(): 1, // 5 // no change since non prune tasks ns.GetName(): -1, // -1 @@ -1941,8 +1987,8 @@ func TestWaitForCleanUpBeforeNextWave(t *testing.T) { // simulate successful delete of pod3 syncCtx.resources = groupResources(ReconciliationResult{ - Target: []*unstructured.Unstructured{nil, nil, }, - Live: []*unstructured.Unstructured{pod1, pod2, }, + Target: []*unstructured.Unstructured{nil, nil}, + Live: []*unstructured.Unstructured{pod1, pod2}, }) // next sync should prune only pod2 @@ -1966,8 +2012,8 @@ func TestWaitForCleanUpBeforeNextWave(t *testing.T) { // simulate successful delete of pod2 syncCtx.resources = groupResources(ReconciliationResult{ - Target: []*unstructured.Unstructured{nil, }, - Live: []*unstructured.Unstructured{pod1, }, + Target: []*unstructured.Unstructured{nil}, + Live: []*unstructured.Unstructured{pod1}, }) // next sync should proceed with next wave diff --git a/pkg/utils/kube/kubetest/mock_resource_operations.go b/pkg/utils/kube/kubetest/mock_resource_operations.go index f56d460bf..c6e30f4e4 100644 --- a/pkg/utils/kube/kubetest/mock_resource_operations.go +++ b/pkg/utils/kube/kubetest/mock_resource_operations.go @@ -22,6 +22,7 @@ type MockResourceOps struct { lastValidate bool serverSideApply bool serverSideApplyManager string + lastForce bool recordLock sync.RWMutex @@ -73,6 +74,19 @@ func (r *MockResourceOps) SetLastServerSideApplyManager(manager string) { r.recordLock.Unlock() } +func (r *MockResourceOps) SetLastForce(force bool) { + r.recordLock.Lock() + r.lastForce = force + r.recordLock.Unlock() +} + +func (r *MockResourceOps) GetLastForce() bool { + r.recordLock.RLock() + force := r.lastForce + r.recordLock.RUnlock() + return force +} + func (r *MockResourceOps) SetLastResourceCommand(key kube.ResourceKey, cmd string) { r.recordLock.Lock() if r.lastCommandPerResource == nil { @@ -95,6 +109,7 @@ func (r *MockResourceOps) ApplyResource(ctx context.Context, obj *unstructured.U r.SetLastValidate(validate) r.SetLastServerSideApply(serverSideApply) r.SetLastServerSideApplyManager(manager) + r.SetLastForce(force) r.SetLastResourceCommand(kube.GetResourceKey(obj), "apply") command, ok := r.Commands[obj.GetName()] if !ok { @@ -105,9 +120,9 @@ func (r *MockResourceOps) ApplyResource(ctx context.Context, obj *unstructured.U } func (r *MockResourceOps) ReplaceResource(ctx context.Context, obj *unstructured.Unstructured, dryRunStrategy cmdutil.DryRunStrategy, force bool) (string, error) { + r.SetLastForce(force) command, ok := r.Commands[obj.GetName()] r.SetLastResourceCommand(kube.GetResourceKey(obj), "replace") - if !ok { return "", nil }