Skip to content

Commit

Permalink
Merge branch 'master' into health-status-timestamp
Browse files Browse the repository at this point in the history
  • Loading branch information
blakepettersson authored Feb 24, 2024
2 parents e315264 + 5bc1850 commit 070cd58
Show file tree
Hide file tree
Showing 64 changed files with 2,863 additions and 1,164 deletions.
22 changes: 13 additions & 9 deletions .github/workflows/ci-build.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -26,19 +26,23 @@ jobs:
changes:
runs-on: ubuntu-latest
outputs:
backend: ${{ steps.filter.outputs.backend }}
frontend: ${{ steps.filter.outputs.frontend }}
backend: ${{ steps.filter.outputs.backend_any_changed }}
frontend: ${{ steps.filter.outputs.frontend_any_changed }}
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
- uses: dorny/paths-filter@4512585405083f25c027a35db413c2b3b9006d50 # v2
- uses: tj-actions/changed-files@90a06d6ba9543371ab4df8eeca0be07ca6054959 # v42.0.2
id: filter
with:
filters: |
# Any file which is not under docs/, ui/ or is not a markdown file is counted as a backend file
files_yaml: |
backend:
- '!(ui/**)'
- '!(**/*.md)'
- '!ui/**'
- '!**.md'
- '!**/*.md'
- '!docs/**'
frontend:
- 'ui/**'
- Dockerfile
check-go:
name: Ensure Go modules synchronicity
if: ${{ needs.changes.outputs.backend == 'true' }}
Expand All @@ -55,7 +59,7 @@ jobs:
- name: Download all Go modules
run: |
go mod download
- name: Check for tidyness of go.mod and go.sum
- name: Check for tidiness of go.mod and go.sum
run: |
go mod tidy
git diff --exit-code -- .
Expand Down Expand Up @@ -302,7 +306,7 @@ jobs:
- name: Setup NodeJS
uses: actions/setup-node@5e21ff4d9bc1a8cf6de233a3057d20ec6b3fb69d # v3.8.1
with:
node-version: '20.7.0'
node-version: '21.6.1'
- name: Restore node dependency cache
id: cache-dependencies
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2
Expand Down Expand Up @@ -466,7 +470,7 @@ jobs:
git config --global user.email "john.doe@example.com"
- name: Pull Docker image required for tests
run: |
docker pull ghcr.io/dexidp/dex:v2.37.0
docker pull ghcr.io/dexidp/dex:v2.38.0
docker pull argoproj/argo-cd-ci-builder:v1.0.0
docker pull redis:7.0.14-alpine
- name: Create target directory for binaries in the build-process
Expand Down
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ WORKDIR /home/argocd
####################################################################################################
# Argo CD UI stage
####################################################################################################
FROM --platform=$BUILDPLATFORM docker.io/library/node:20.6.1@sha256:14bd39208dbc0eb171cbfb26ccb9ac09fa1b2eba04ccd528ab5d12983fd9ee24 AS argocd-ui
FROM --platform=$BUILDPLATFORM docker.io/library/node:21.6.2@sha256:65998e325b06014d4f1417a8a6afb1540d1ac66521cca76f2221a6953947f9ee AS argocd-ui

WORKDIR /src
COPY ["ui/package.json", "ui/yarn.lock", "./"]
Expand Down
10 changes: 7 additions & 3 deletions USERS.md
Original file line number Diff line number Diff line change
Expand Up @@ -44,14 +44,14 @@ Currently, the following organizations are **officially** using Argo CD:
1. [Camptocamp](https://camptocamp.com)
1. [Candis](https://www.candis.io)
1. [Capital One](https://www.capitalone.com)
1. [CARFAX](https://www.carfax.com)
1. [CARFAX Europe](https://www.carfax.eu)
1. [CARFAX](https://www.carfax.com)
1. [Carrefour Group](https://www.carrefour.com)
1. [Casavo](https://casavo.com)
1. [Celonis](https://www.celonis.com/)
1. [CERN](https://home.cern/)
1. [Chargetrip](https://chargetrip.com)
1. [Chainnodes](https://chainnodes.org)
1. [Chargetrip](https://chargetrip.com)
1. [Chime](https://www.chime.com)
1. [Cisco ET&I](https://eti.cisco.com/)
1. [Cloud Posse](https://www.cloudposse.com/)
Expand Down Expand Up @@ -113,8 +113,8 @@ Currently, the following organizations are **officially** using Argo CD:
1. [GlueOps](https://glueops.dev)
1. [GMETRI](https://gmetri.com/)
1. [Gojek](https://www.gojek.io/)
1. [GoTo](https://www.goto.com/)
1. [GoTo Financial](https://gotofinancial.com/)
1. [GoTo](https://www.goto.com/)
1. [Greenpass](https://www.greenpass.com.br/)
1. [Gridfuse](https://gridfuse.com/)
1. [Groww](https://groww.in)
Expand Down Expand Up @@ -188,6 +188,7 @@ Currently, the following organizations are **officially** using Argo CD:
1. [Natura &Co](https://naturaeco.com/)
1. [Nethopper](https://nethopper.io)
1. [New Relic](https://newrelic.com/)
1. [Nextbasket](https://nextbasket.com)
1. [Nextdoor](https://nextdoor.com/)
1. [Nikkei](https://www.nikkei.co.jp/nikkeiinfo/en/)
1. [Nitro](https://gonitro.com)
Expand All @@ -214,6 +215,7 @@ Currently, the following organizations are **officially** using Argo CD:
1. [PagerDuty](https://www.pagerduty.com/)
1. [Pandosearch](https://www.pandosearch.com/en/home)
1. [Patreon](https://www.patreon.com/)
1. [PayIt](https://payitgov.com/)
1. [PayPay](https://paypay.ne.jp/)
1. [Peloton Interactive](https://www.onepeloton.com/)
1. [Percona](https://percona.com/)
Expand Down Expand Up @@ -243,6 +245,7 @@ Currently, the following organizations are **officially** using Argo CD:
1. [Reenigne Cloud](https://reenigne.ca)
1. [reev.com](https://www.reev.com/)
1. [RightRev](https://rightrev.com/)
1. [Rijkswaterstaat](https://www.rijkswaterstaat.nl/en)
1. [Rise](https://www.risecard.eu/)
1. [Riskified](https://www.riskified.com/)
1. [Robotinfra](https://www.robotinfra.com)
Expand Down Expand Up @@ -271,6 +274,7 @@ Currently, the following organizations are **officially** using Argo CD:
1. [Splunk](https://splunk.com/)
1. [Spores Labs](https://spores.app)
1. [Statsig](https://statsig.com)
1. [SternumIOT](https://sternumiot.com)
1. [StreamNative](https://streamnative.io)
1. [Stuart](https://stuart.com/)
1. [Sumo Logic](https://sumologic.com/)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,6 @@ import (
"github.com/redis/go-redis/v9"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
kubeerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"

Expand All @@ -26,7 +24,6 @@ import (
cacheutil "github.com/argoproj/argo-cd/v2/util/cache"
appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate"
"github.com/argoproj/argo-cd/v2/util/cli"
"github.com/argoproj/argo-cd/v2/util/db"
"github.com/argoproj/argo-cd/v2/util/env"
"github.com/argoproj/argo-cd/v2/util/errors"
kubeutil "github.com/argoproj/argo-cd/v2/util/kube"
Expand Down Expand Up @@ -147,7 +144,8 @@ func NewCommand() *cobra.Command {
appController.InvalidateProjectsCache()
}))
kubectl := kubeutil.NewKubectl()
clusterSharding := getClusterSharding(kubeClient, settingsMgr, shardingAlgorithm, enableDynamicClusterDistribution)
clusterSharding, err := sharding.GetClusterSharding(kubeClient, settingsMgr, shardingAlgorithm, enableDynamicClusterDistribution)
errors.CheckError(err)
appController, err = controller.NewApplicationController(
namespace,
settingsMgr,
Expand All @@ -170,6 +168,7 @@ func NewCommand() *cobra.Command {
applicationNamespaces,
&workqueueRateLimit,
serverSideDiff,
enableDynamicClusterDistribution,
)
errors.CheckError(err)
cacheutil.CollectMetrics(redisClient, appController.GetMetricsServer())
Expand Down Expand Up @@ -237,56 +236,3 @@ func NewCommand() *cobra.Command {
})
return &command
}

func getClusterSharding(kubeClient *kubernetes.Clientset, settingsMgr *settings.SettingsManager, shardingAlgorithm string, enableDynamicClusterDistribution bool) sharding.ClusterShardingCache {
var replicasCount int
// StatefulSet mode and Deployment mode uses different default values for shard number.
defaultShardNumberValue := 0
applicationControllerName := env.StringFromEnv(common.EnvAppControllerName, common.DefaultApplicationControllerName)
appControllerDeployment, err := kubeClient.AppsV1().Deployments(settingsMgr.GetNamespace()).Get(context.Background(), applicationControllerName, metav1.GetOptions{})

// if the application controller deployment was not found, the Get() call returns an empty Deployment object. So, set the variable to nil explicitly
if err != nil && kubeerrors.IsNotFound(err) {
appControllerDeployment = nil
}

if enableDynamicClusterDistribution && appControllerDeployment != nil && appControllerDeployment.Spec.Replicas != nil {
replicasCount = int(*appControllerDeployment.Spec.Replicas)
defaultShardNumberValue = -1
} else {
replicasCount = env.ParseNumFromEnv(common.EnvControllerReplicas, 0, 0, math.MaxInt32)
}
shardNumber := env.ParseNumFromEnv(common.EnvControllerShard, defaultShardNumberValue, -math.MaxInt32, math.MaxInt32)
if replicasCount > 1 {
// check for shard mapping using configmap if application-controller is a deployment
// else use existing logic to infer shard from pod name if application-controller is a statefulset
if enableDynamicClusterDistribution && appControllerDeployment != nil {
var err error
// retry 3 times if we find a conflict while updating shard mapping configMap.
// If we still see conflicts after the retries, wait for next iteration of heartbeat process.
for i := 0; i <= common.AppControllerHeartbeatUpdateRetryCount; i++ {
shardNumber, err = sharding.GetOrUpdateShardFromConfigMap(kubeClient, settingsMgr, replicasCount, shardNumber)
if err != nil && !kubeerrors.IsConflict(err) {
err = fmt.Errorf("unable to get shard due to error updating the sharding config map: %s", err)
break
}
log.Warnf("conflict when getting shard from shard mapping configMap. Retrying (%d/3)", i)
}
errors.CheckError(err)
} else {
if shardNumber < 0 {
var err error
shardNumber, err = sharding.InferShard()
errors.CheckError(err)
}
if shardNumber > replicasCount {
log.Warnf("Calculated shard number %d is greated than the number of replicas count. Defaulting to 0", shardNumber)
shardNumber = 0
}
}
} else {
log.Info("Processing all cluster shards")
}
db := db.NewDB(settingsMgr.GetNamespace(), settingsMgr, kubeClient)
return sharding.NewClusterSharding(db, shardNumber, replicasCount, shardingAlgorithm)
}
34 changes: 33 additions & 1 deletion cmd/argocd/commands/app.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
k8swatch "k8s.io/apimachinery/pkg/watch"
"k8s.io/utils/pointer"
"sigs.k8s.io/yaml"

Expand Down Expand Up @@ -101,6 +102,7 @@ type watchOpts struct {
operation bool
suspended bool
degraded bool
delete bool
}

// NewApplicationCreateCommand returns a new instance of an `argocd app create` command
Expand Down Expand Up @@ -1116,6 +1118,7 @@ func NewApplicationDiffCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
defer argoio.Close(conn)
cluster, err := clusterIf.Get(ctx, &clusterpkg.ClusterQuery{Name: app.Spec.Destination.Name, Server: app.Spec.Destination.Server})
errors.CheckError(err)

diffOption.local = local
diffOption.localRepoRoot = localRepoRoot
diffOption.cluster = cluster
Expand Down Expand Up @@ -1276,6 +1279,7 @@ func NewApplicationDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra.
noPrompt bool
propagationPolicy string
selector string
wait bool
)
var command = &cobra.Command{
Use: "delete APPNAME",
Expand All @@ -1299,7 +1303,8 @@ func NewApplicationDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra.
c.HelpFunc()(c, args)
os.Exit(1)
}
conn, appIf := headless.NewClientOrDie(clientOpts, c).NewApplicationClientOrDie()
acdClient := headless.NewClientOrDie(clientOpts, c)
conn, appIf := acdClient.NewApplicationClientOrDie()
defer argoio.Close(conn)
var isTerminal bool = isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsCygwinTerminal(os.Stdout.Fd())
var isConfirmAll bool = false
Expand Down Expand Up @@ -1346,13 +1351,20 @@ func NewApplicationDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra.
if lowercaseAnswer == "y" {
_, err := appIf.Delete(ctx, &appDeleteReq)
errors.CheckError(err)
if wait {
checkForDeleteEvent(ctx, acdClient, appFullName)
}
fmt.Printf("application '%s' deleted\n", appFullName)
} else {
fmt.Println("The command to delete '" + appFullName + "' was cancelled.")
}
} else {
_, err := appIf.Delete(ctx, &appDeleteReq)
errors.CheckError(err)

if wait {
checkForDeleteEvent(ctx, acdClient, appFullName)
}
}
}
},
Expand All @@ -1361,9 +1373,19 @@ func NewApplicationDeleteCommand(clientOpts *argocdclient.ClientOptions) *cobra.
command.Flags().StringVarP(&propagationPolicy, "propagation-policy", "p", "foreground", "Specify propagation policy for deletion of application's resources. One of: foreground|background")
command.Flags().BoolVarP(&noPrompt, "yes", "y", false, "Turn off prompting to confirm cascaded deletion of application resources")
command.Flags().StringVarP(&selector, "selector", "l", "", "Delete all apps with matching label. Supports '=', '==', '!=', in, notin, exists & not exists. Matching apps must satisfy all of the specified label constraints.")
command.Flags().BoolVar(&wait, "wait", false, "Wait until deletion of the application(s) completes")
return command
}

func checkForDeleteEvent(ctx context.Context, acdClient argocdclient.Client, appFullName string) {
appEventCh := acdClient.WatchApplicationWithRetry(ctx, appFullName, "")
for appEvent := range appEventCh {
if appEvent.Type == k8swatch.Deleted {
return
}
}
}

// Print simple list of application names
func printApplicationNames(apps []argoappv1.Application) {
for _, app := range apps {
Expand Down Expand Up @@ -1637,6 +1659,7 @@ func NewApplicationWaitCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
command.Flags().BoolVar(&watch.health, "health", false, "Wait for health")
command.Flags().BoolVar(&watch.suspended, "suspended", false, "Wait for suspended")
command.Flags().BoolVar(&watch.degraded, "degraded", false, "Wait for degraded")
command.Flags().BoolVar(&watch.delete, "delete", false, "Wait for delete")
command.Flags().StringVarP(&selector, "selector", "l", "", "Wait for apps by label. Supports '=', '==', '!=', in, notin, exists & not exists. Matching apps must satisfy all of the specified label constraints.")
command.Flags().StringArrayVar(&resources, "resource", []string{}, fmt.Sprintf("Sync only specific resources as GROUP%[1]sKIND%[1]sNAME or %[2]sGROUP%[1]sKIND%[1]sNAME. Fields may be blank and '*' can be used. This option may be specified repeatedly", resourceFieldDelimiter, resourceExcludeIndicator))
command.Flags().BoolVar(&watch.operation, "operation", false, "Wait for pending operations")
Expand Down Expand Up @@ -2131,6 +2154,9 @@ func groupResourceStates(app *argoappv1.Application, selectedResources []*argoap

// check if resource health, sync and operation statuses matches watch options
func checkResourceStatus(watch watchOpts, healthStatus string, syncStatus string, operationStatus *argoappv1.Operation) bool {
if watch.delete {
return false
}
healthCheckPassed := true

if watch.suspended && watch.health && watch.degraded {
Expand Down Expand Up @@ -2283,6 +2309,12 @@ func waitOnApplicationStatus(ctx context.Context, acdClient argocdclient.Client,

finalOperationState = app.Status.OperationState
operationInProgress := false

if watch.delete && appEvent.Type == k8swatch.Deleted {
fmt.Printf("Application '%s' deleted\n", app.QualifiedName())
return nil, nil, nil
}

// consider the operation is in progress
if app.Operation != nil {
// if it just got requested
Expand Down
Loading

0 comments on commit 070cd58

Please sign in to comment.