From 9d405cddbad37abca49deac0ecaf9c24462c06b7 Mon Sep 17 00:00:00 2001 From: Hritik Batra Date: Wed, 31 Jul 2024 20:36:34 +0530 Subject: [PATCH 01/25] [CI] Helm Chart Update argo-cd --- CHANGELOG.md | 7 ++++++ argocd-helm-charts/argo-cd/Chart.yaml | 2 +- .../argo-cd/charts/argo-cd/Chart.yaml | 6 ++--- .../argo-cd/charts/argo-cd/README.md | 25 +++++++++++++++++++ .../charts/argo-cd/templates/NOTES.txt | 4 +-- .../charts/argo-cd/templates/_helpers.tpl | 2 +- .../deployment.yaml | 14 ++++++++++- .../prometheusrule.yaml | 2 +- .../servicemonitor.yaml | 2 +- .../statefulset.yaml | 2 +- .../argocd-applicationset/servicemonitor.yaml | 2 +- .../argocd-configs/cluster-secrets.yaml | 2 +- .../repository-credentials-secret.yaml | 2 +- .../argocd-configs/repository-secret.yaml | 2 +- .../argocd-notifications/servicemonitor.yaml | 2 +- .../argocd-repo-server/servicemonitor.yaml | 2 +- .../argocd-server/servicemonitor.yaml | 2 +- .../argo-cd/templates/dex/servicemonitor.yaml | 2 +- .../templates/redis-secret-init/job.yaml | 2 +- .../templates/redis-secret-init/role.yaml | 2 +- .../redis-secret-init/rolebinding.yaml | 2 +- .../redis-secret-init/serviceaccount.yaml | 4 +-- .../templates/redis/servicemonitor.yaml | 2 +- argocd-helm-charts/argo-cd/requirements.lock | 6 ++--- 24 files changed, 72 insertions(+), 28 deletions(-) create mode 100644 CHANGELOG.md diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 000000000..1b44c8a85 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,7 @@ +# Helm Charts Changelog + +All releases and the changes included in them (pulled from git commits added since last release) will be detailed in this file. + + +## 20240731 +- Updated: argo-cd from version 7.3.4 to 7.3.11 diff --git a/argocd-helm-charts/argo-cd/Chart.yaml b/argocd-helm-charts/argo-cd/Chart.yaml index 8d31c0900..a02bc6a88 100644 --- a/argocd-helm-charts/argo-cd/Chart.yaml +++ b/argocd-helm-charts/argo-cd/Chart.yaml @@ -7,5 +7,5 @@ dependencies: # 2.3v argocd failes with remote value files # 3.35.4v helm chart fails also with bad path of the values files (this is the last release for 2.2.x argocd) # please stick to 3.29.5, why ? cause its a tested and the last working helm chart version - version: "7.3.4" + version: "7.3.11" repository: https://argoproj.github.io/argo-helm diff --git a/argocd-helm-charts/argo-cd/charts/argo-cd/Chart.yaml b/argocd-helm-charts/argo-cd/charts/argo-cd/Chart.yaml index 9c703ae6c..0734fc096 100644 --- a/argocd-helm-charts/argo-cd/charts/argo-cd/Chart.yaml +++ b/argocd-helm-charts/argo-cd/charts/argo-cd/Chart.yaml @@ -1,12 +1,12 @@ annotations: artifacthub.io/changes: | - kind: changed - description: Bump argo-cd to v2.11.4 + description: Bump argo-cd to v2.11.7 artifacthub.io/signKey: | fingerprint: 2B8F22F57260EFA67BE1C5824B11F800CD9D2252 url: https://argoproj.github.io/argo-helm/pgp_keys.asc apiVersion: v2 -appVersion: v2.11.4 +appVersion: v2.11.7 dependencies: - condition: redis-ha.enabled name: redis-ha @@ -28,4 +28,4 @@ name: argo-cd sources: - https://github.com/argoproj/argo-helm/tree/main/charts/argo-cd - https://github.com/argoproj/argo-cd -version: 7.3.4 +version: 7.3.11 diff --git a/argocd-helm-charts/argo-cd/charts/argo-cd/README.md b/argocd-helm-charts/argo-cd/charts/argo-cd/README.md index 3801650dd..c55c55776 100644 --- a/argocd-helm-charts/argo-cd/charts/argo-cd/README.md +++ b/argocd-helm-charts/argo-cd/charts/argo-cd/README.md @@ -278,6 +278,31 @@ For full list of changes please check ArtifactHub [changelog]. Highlighted versions provide information about additional steps that should be performed by user when upgrading to newer version. +### 7.0.0 + +We changed the type of `.Values.configs.clusterCredentials` from `list` to `object`. +If you used the value, please migrate like below. + +```yaml +# before +configs: + clusterCredentials: + - mycluster: + server: https://mycluster.example.com + labels: {} + annotations: {} + # ... + +# after +configs: + clusterCredentials: + mycluster: + server: https://mycluster.example.com + labels: {} + annotations: {} + # ... +``` + ### 6.10.0 This version introduces authentication for Redis to mitigate GHSA-9766-5277-j5hr. diff --git a/argocd-helm-charts/argo-cd/charts/argo-cd/templates/NOTES.txt b/argocd-helm-charts/argo-cd/charts/argo-cd/templates/NOTES.txt index f2dbdfab3..8821ab754 100644 --- a/argocd-helm-charts/argo-cd/charts/argo-cd/templates/NOTES.txt +++ b/argocd-helm-charts/argo-cd/charts/argo-cd/templates/NOTES.txt @@ -1,6 +1,6 @@ In order to access the server UI you have the following options: -1. kubectl port-forward service/{{ include "argo-cd.fullname" . }}-server -n {{ .Release.Namespace }} 8080:443 +1. kubectl port-forward service/{{ include "argo-cd.fullname" . }}-server -n {{ include "argo-cd.namespace" . }} 8080:443 and then open the browser on http://localhost:8080 and accept the certificate @@ -12,7 +12,7 @@ In order to access the server UI you have the following options: {{ if eq (toString (index .Values.configs.cm "admin.enabled")) "true" -}} After reaching the UI the first time you can login with username: admin and the random password generated during the installation. You can find the password by running: -kubectl -n {{ .Release.Namespace }} get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d +kubectl -n {{ include "argo-cd.namespace" . }} get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d (You should delete the initial secret afterwards as suggested by the Getting Started Guide: https://argo-cd.readthedocs.io/en/stable/getting_started/#4-login-using-the-cli) {{ else if or (index .Values.configs.cm "dex.config") (index .Values.configs.cm "oidc.config") -}} diff --git a/argocd-helm-charts/argo-cd/charts/argo-cd/templates/_helpers.tpl b/argocd-helm-charts/argo-cd/charts/argo-cd/templates/_helpers.tpl index c609f19fb..eb3f7a2a7 100644 --- a/argocd-helm-charts/argo-cd/charts/argo-cd/templates/_helpers.tpl +++ b/argocd-helm-charts/argo-cd/charts/argo-cd/templates/_helpers.tpl @@ -99,7 +99,7 @@ Create the name of the Redis secret-init service account to use */}} {{- define "argo-cd.redisSecretInit.serviceAccountName" -}} {{- if .Values.redisSecretInit.serviceAccount.create -}} - {{ default (include "argo-cd.redisSecretInit.fullname" .) .Values.redis.serviceAccount.name }} + {{ default (include "argo-cd.redisSecretInit.fullname" .) .Values.redisSecretInit.serviceAccount.name }} {{- else -}} {{ default "default" .Values.redisSecretInit.serviceAccount.name }} {{- end -}} diff --git a/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-application-controller/deployment.yaml b/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-application-controller/deployment.yaml index 7c4a2a6da..3938c25d2 100644 --- a/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-application-controller/deployment.yaml +++ b/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-application-controller/deployment.yaml @@ -208,10 +208,22 @@ spec: name: {{ default "argocd-redis" .Values.externalRedis.existingSecret }} {{- if .Values.externalRedis.host }} key: redis-password - optional: true {{- else }} key: auth {{- end }} + optional: true + - name: REDIS_SENTINEL_USERNAME + valueFrom: + secretKeyRef: + name: {{ default (include "argo-cd.redis.fullname" .) .Values.externalRedis.existingSecret }} + key: redis-sentinel-username + optional: true + - name: REDIS_SENTINEL_PASSWORD + valueFrom: + secretKeyRef: + name: {{ default (include "argo-cd.redis.fullname" .) .Values.externalRedis.existingSecret }} + key: redis-sentinel-password + optional: true - name: ARGOCD_DEFAULT_CACHE_EXPIRATION valueFrom: configMapKeyRef: diff --git a/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-application-controller/prometheusrule.yaml b/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-application-controller/prometheusrule.yaml index 20d8eea8d..6ddc7f4c4 100644 --- a/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-application-controller/prometheusrule.yaml +++ b/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-application-controller/prometheusrule.yaml @@ -3,7 +3,7 @@ apiVersion: monitoring.coreos.com/v1 kind: PrometheusRule metadata: name: {{ template "argo-cd.controller.fullname" . }} - namespace: {{ default .Release.Namespace .Values.controller.metrics.rules.namespace | quote }} + namespace: {{ default (include "argo-cd.namespace" .) .Values.controller.metrics.rules.namespace | quote }} labels: {{- include "argo-cd.labels" (dict "context" . "component" .Values.controller.name "name" .Values.controller.name) | nindent 4 }} {{- if .Values.controller.metrics.rules.selector }} diff --git a/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-application-controller/servicemonitor.yaml b/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-application-controller/servicemonitor.yaml index 4a09daf34..a9edaf545 100644 --- a/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-application-controller/servicemonitor.yaml +++ b/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-application-controller/servicemonitor.yaml @@ -3,7 +3,7 @@ apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: name: {{ template "argo-cd.controller.fullname" . }} - namespace: {{ default .Release.Namespace .Values.controller.metrics.serviceMonitor.namespace | quote }} + namespace: {{ default (include "argo-cd.namespace" .) .Values.controller.metrics.serviceMonitor.namespace | quote }} labels: {{- include "argo-cd.labels" (dict "context" . "component" .Values.controller.name "name" .Values.controller.name) | nindent 4 }} {{- with .Values.controller.metrics.serviceMonitor.selector }} diff --git a/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-application-controller/statefulset.yaml b/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-application-controller/statefulset.yaml index 8231f3118..3938a7592 100644 --- a/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-application-controller/statefulset.yaml +++ b/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-application-controller/statefulset.yaml @@ -205,12 +205,12 @@ spec: valueFrom: secretKeyRef: name: {{ default "argocd-redis" .Values.externalRedis.existingSecret }} - optional: true {{- if .Values.externalRedis.host }} key: redis-password {{- else }} key: auth {{- end }} + optional: true - name: REDIS_SENTINEL_USERNAME valueFrom: secretKeyRef: diff --git a/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-applicationset/servicemonitor.yaml b/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-applicationset/servicemonitor.yaml index 9c236c453..8fac6a8da 100644 --- a/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-applicationset/servicemonitor.yaml +++ b/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-applicationset/servicemonitor.yaml @@ -3,7 +3,7 @@ apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: name: {{ template "argo-cd.applicationSet.fullname" . }} - namespace: {{ default .Release.Namespace .Values.applicationSet.metrics.serviceMonitor.namespace | quote }} + namespace: {{ default (include "argo-cd.namespace" .) .Values.applicationSet.metrics.serviceMonitor.namespace | quote }} labels: {{- include "argo-cd.labels" (dict "context" . "component" .Values.applicationSet.name "name" .Values.applicationSet.name) | nindent 4 }} {{- with .Values.applicationSet.metrics.serviceMonitor.selector }} diff --git a/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-configs/cluster-secrets.yaml b/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-configs/cluster-secrets.yaml index d952f7c9f..0b4b1e113 100644 --- a/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-configs/cluster-secrets.yaml +++ b/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-configs/cluster-secrets.yaml @@ -4,7 +4,7 @@ apiVersion: v1 kind: Secret metadata: name: {{ include "argo-cd.name" $ }}-cluster-{{ $cluster_key }} - namespace: {{ $.Release.Namespace | quote }} + namespace: {{ include "argo-cd.namespace" $ | quote }} labels: {{- include "argo-cd.labels" (dict "context" $) | nindent 4 }} {{- with $cluster_value.labels }} diff --git a/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-configs/repository-credentials-secret.yaml b/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-configs/repository-credentials-secret.yaml index ed1d2fd76..e4d23f9a5 100644 --- a/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-configs/repository-credentials-secret.yaml +++ b/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-configs/repository-credentials-secret.yaml @@ -4,7 +4,7 @@ apiVersion: v1 kind: Secret metadata: name: argocd-repo-creds-{{ $repo_cred_key }} - namespace: {{ $.Release.Namespace | quote }} + namespace: {{ include "argo-cd.namespace" $ | quote }} labels: argocd.argoproj.io/secret-type: repo-creds {{- include "argo-cd.labels" (dict "context" $) | nindent 4 }} diff --git a/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-configs/repository-secret.yaml b/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-configs/repository-secret.yaml index 4c0289585..4a77cf1bd 100644 --- a/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-configs/repository-secret.yaml +++ b/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-configs/repository-secret.yaml @@ -4,7 +4,7 @@ apiVersion: v1 kind: Secret metadata: name: argocd-repo-{{ $repo_key }} - namespace: {{ $.Release.Namespace | quote }} + namespace: {{ include "argo-cd.namespace" $ | quote }} labels: argocd.argoproj.io/secret-type: repository {{- include "argo-cd.labels" (dict "context" $) | nindent 4 }} diff --git a/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-notifications/servicemonitor.yaml b/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-notifications/servicemonitor.yaml index 03599a6d9..0d2c704f2 100644 --- a/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-notifications/servicemonitor.yaml +++ b/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-notifications/servicemonitor.yaml @@ -3,7 +3,7 @@ apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: name: {{ template "argo-cd.notifications.fullname" . }} - namespace: {{ default .Release.Namespace .Values.notifications.metrics.serviceMonitor.namespace | quote }} + namespace: {{ default (include "argo-cd.namespace" .) .Values.notifications.metrics.serviceMonitor.namespace | quote }} labels: {{- include "argo-cd.labels" (dict "context" . "component" .Values.notifications.name "name" .Values.notifications.name) | nindent 4 }} {{- with .Values.notifications.metrics.serviceMonitor.selector }} diff --git a/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-repo-server/servicemonitor.yaml b/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-repo-server/servicemonitor.yaml index 38041de34..709953987 100644 --- a/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-repo-server/servicemonitor.yaml +++ b/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-repo-server/servicemonitor.yaml @@ -3,7 +3,7 @@ apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: name: {{ template "argo-cd.repoServer.fullname" . }} - namespace: {{ default .Release.Namespace .Values.repoServer.metrics.serviceMonitor.namespace | default }} + namespace: {{ default (include "argo-cd.namespace" .) .Values.repoServer.metrics.serviceMonitor.namespace | quote }} labels: {{- include "argo-cd.labels" (dict "context" . "component" .Values.repoServer.name "name" .Values.repoServer.name) | nindent 4 }} {{- with .Values.repoServer.metrics.serviceMonitor.selector }} diff --git a/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-server/servicemonitor.yaml b/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-server/servicemonitor.yaml index 2f96ca0ac..74902ef04 100644 --- a/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-server/servicemonitor.yaml +++ b/argocd-helm-charts/argo-cd/charts/argo-cd/templates/argocd-server/servicemonitor.yaml @@ -3,7 +3,7 @@ apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: name: {{ template "argo-cd.server.fullname" . }} - namespace: {{ default .Release.Namespace .Values.server.metrics.serviceMonitor.namespace | quote }} + namespace: {{ default (include "argo-cd.namespace" .) .Values.server.metrics.serviceMonitor.namespace | quote }} labels: {{- include "argo-cd.labels" (dict "context" . "component" .Values.server.name "name" .Values.server.name) | nindent 4 }} {{- with .Values.server.metrics.serviceMonitor.selector }} diff --git a/argocd-helm-charts/argo-cd/charts/argo-cd/templates/dex/servicemonitor.yaml b/argocd-helm-charts/argo-cd/charts/argo-cd/templates/dex/servicemonitor.yaml index 3faa667a0..2c03e5970 100644 --- a/argocd-helm-charts/argo-cd/charts/argo-cd/templates/dex/servicemonitor.yaml +++ b/argocd-helm-charts/argo-cd/charts/argo-cd/templates/dex/servicemonitor.yaml @@ -3,7 +3,7 @@ apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: name: {{ template "argo-cd.dex.fullname" . }} - namespace: {{ default .Release.Namespace .Values.dex.metrics.serviceMonitor.namespace | quote }} + namespace: {{ default (include "argo-cd.namespace" .) .Values.dex.metrics.serviceMonitor.namespace | quote }} labels: {{- include "argo-cd.labels" (dict "context" . "component" .Values.dex.name "name" .Values.dex.name) | nindent 4 }} {{- with .Values.dex.metrics.serviceMonitor.selector }} diff --git a/argocd-helm-charts/argo-cd/charts/argo-cd/templates/redis-secret-init/job.yaml b/argocd-helm-charts/argo-cd/charts/argo-cd/templates/redis-secret-init/job.yaml index 27837465a..680862351 100644 --- a/argocd-helm-charts/argo-cd/charts/argo-cd/templates/redis-secret-init/job.yaml +++ b/argocd-helm-charts/argo-cd/charts/argo-cd/templates/redis-secret-init/job.yaml @@ -3,7 +3,7 @@ apiVersion: batch/v1 kind: Job metadata: name: {{ include "argo-cd.redisSecretInit.fullname" . }} - namespace: {{ .Release.Namespace | quote }} + namespace: {{ include "argo-cd.namespace" . | quote }} annotations: "helm.sh/hook": pre-install,pre-upgrade "helm.sh/hook-delete-policy": before-hook-creation diff --git a/argocd-helm-charts/argo-cd/charts/argo-cd/templates/redis-secret-init/role.yaml b/argocd-helm-charts/argo-cd/charts/argo-cd/templates/redis-secret-init/role.yaml index ac5fd3134..9e8259f97 100644 --- a/argocd-helm-charts/argo-cd/charts/argo-cd/templates/redis-secret-init/role.yaml +++ b/argocd-helm-charts/argo-cd/charts/argo-cd/templates/redis-secret-init/role.yaml @@ -8,7 +8,7 @@ metadata: labels: {{- include "argo-cd.labels" (dict "context" . "component" .Values.redisSecretInit.name "name" .Values.redisSecretInit.name) | nindent 4 }} name: {{ include "argo-cd.redisSecretInit.fullname" . }} - namespace: {{ .Release.Namespace | quote }} + namespace: {{ include "argo-cd.namespace" . | quote }} rules: - apiGroups: - "" diff --git a/argocd-helm-charts/argo-cd/charts/argo-cd/templates/redis-secret-init/rolebinding.yaml b/argocd-helm-charts/argo-cd/charts/argo-cd/templates/redis-secret-init/rolebinding.yaml index 7ea1de961..a199628a9 100644 --- a/argocd-helm-charts/argo-cd/charts/argo-cd/templates/redis-secret-init/rolebinding.yaml +++ b/argocd-helm-charts/argo-cd/charts/argo-cd/templates/redis-secret-init/rolebinding.yaml @@ -8,7 +8,7 @@ metadata: labels: {{- include "argo-cd.labels" (dict "context" . "component" .Values.redisSecretInit.name "name" .Values.redisSecretInit.name) | nindent 4 }} name: {{ include "argo-cd.redisSecretInit.fullname" . }} - namespace: {{ .Release.Namespace | quote }} + namespace: {{ include "argo-cd.namespace" . | quote }} roleRef: apiGroup: rbac.authorization.k8s.io kind: Role diff --git a/argocd-helm-charts/argo-cd/charts/argo-cd/templates/redis-secret-init/serviceaccount.yaml b/argocd-helm-charts/argo-cd/charts/argo-cd/templates/redis-secret-init/serviceaccount.yaml index d6b95f138..85540d262 100644 --- a/argocd-helm-charts/argo-cd/charts/argo-cd/templates/redis-secret-init/serviceaccount.yaml +++ b/argocd-helm-charts/argo-cd/charts/argo-cd/templates/redis-secret-init/serviceaccount.yaml @@ -1,10 +1,10 @@ -{{- if and .Values.redisSecretInit.enabled (not .Values.externalRedis.host) }} +{{- if and .Values.redisSecretInit.enabled .Values.redisSecretInit.serviceAccount.create (not .Values.externalRedis.host) }} apiVersion: v1 kind: ServiceAccount automountServiceAccountToken: {{ .Values.redisSecretInit.serviceAccount.automountServiceAccountToken }} metadata: name: {{ include "argo-cd.redisSecretInit.serviceAccountName" . }} - namespace: {{ .Release.Namespace | quote }} + namespace: {{ include "argo-cd.namespace" . | quote }} annotations: "helm.sh/hook": pre-install,pre-upgrade "helm.sh/hook-delete-policy": before-hook-creation diff --git a/argocd-helm-charts/argo-cd/charts/argo-cd/templates/redis/servicemonitor.yaml b/argocd-helm-charts/argo-cd/charts/argo-cd/templates/redis/servicemonitor.yaml index 2126bafbf..4132c1ce0 100644 --- a/argocd-helm-charts/argo-cd/charts/argo-cd/templates/redis/servicemonitor.yaml +++ b/argocd-helm-charts/argo-cd/charts/argo-cd/templates/redis/servicemonitor.yaml @@ -4,7 +4,7 @@ apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: name: {{ template "argo-cd.redis.fullname" . }} - namespace: {{ default .Release.Namespace .Values.redis.metrics.serviceMonitor.namespace | quote }} + namespace: {{ default (include "argo-cd.namespace" .) .Values.redis.metrics.serviceMonitor.namespace | quote }} labels: {{- include "argo-cd.labels" (dict "context" . "component" .Values.redis.name "name" .Values.redis.name) | nindent 4 }} {{- with .Values.redis.metrics.serviceMonitor.selector }} diff --git a/argocd-helm-charts/argo-cd/requirements.lock b/argocd-helm-charts/argo-cd/requirements.lock index e2fc5032a..396bf2f56 100644 --- a/argocd-helm-charts/argo-cd/requirements.lock +++ b/argocd-helm-charts/argo-cd/requirements.lock @@ -1,6 +1,6 @@ dependencies: - name: argo-cd repository: https://argoproj.github.io/argo-helm - version: 7.3.4 -digest: sha256:233414ae5809fc82127c2fb3d105a91ea0c6dae54985ce9581e8bf913a4ea925 -generated: "2024-07-09T02:54:19.015770931+05:30" + version: 7.3.11 +digest: sha256:50201611354d2274d3685d1fd6ac9391cbbe3853626a68d232ddb89fee1e2610 +generated: "2024-07-31T20:36:24.494739235+05:30" From 15112fd86790fbae6f0cd84e4a6e37f398b9db78 Mon Sep 17 00:00:00 2001 From: Hritik Batra Date: Wed, 31 Jul 2024 20:37:14 +0530 Subject: [PATCH 02/25] [CI] Helm Chart Update aws-ebs-csi-driver --- CHANGELOG.md | 1 + argocd-helm-charts/aws-ebs-csi-driver/Chart.lock | 6 +++--- argocd-helm-charts/aws-ebs-csi-driver/Chart.yaml | 2 +- .../charts/aws-ebs-csi-driver/CHANGELOG.md | 6 ++++++ .../charts/aws-ebs-csi-driver/Chart.yaml | 4 ++-- .../charts/aws-ebs-csi-driver/templates/_node.tpl | 2 +- .../aws-ebs-csi-driver/templates/controller.yaml | 8 ++++---- .../charts/aws-ebs-csi-driver/values.yaml | 14 +++++++------- 8 files changed, 25 insertions(+), 18 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1b44c8a85..5b023e98f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,3 +5,4 @@ All releases and the changes included in them (pulled from git commits added sin ## 20240731 - Updated: argo-cd from version 7.3.4 to 7.3.11 +- Updated: aws-ebs-csi-driver from version 2.32.0 to 2.33.0 diff --git a/argocd-helm-charts/aws-ebs-csi-driver/Chart.lock b/argocd-helm-charts/aws-ebs-csi-driver/Chart.lock index aeb8d0a31..7c1d5e4aa 100644 --- a/argocd-helm-charts/aws-ebs-csi-driver/Chart.lock +++ b/argocd-helm-charts/aws-ebs-csi-driver/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: aws-ebs-csi-driver repository: https://kubernetes-sigs.github.io/aws-ebs-csi-driver - version: 2.32.0 -digest: sha256:2f8dec72efd33661312c3fbc1f6d8a1f65e529991d95c1ae194a281ad55fd270 -generated: "2024-07-09T02:53:15.918149377+05:30" + version: 2.33.0 +digest: sha256:6993db6d49a6f2e79f067f49c96375e650a343fa61732eb8c29226d2fbce2dec +generated: "2024-07-31T20:37:04.632039601+05:30" diff --git a/argocd-helm-charts/aws-ebs-csi-driver/Chart.yaml b/argocd-helm-charts/aws-ebs-csi-driver/Chart.yaml index b61fece80..5639d8b48 100644 --- a/argocd-helm-charts/aws-ebs-csi-driver/Chart.yaml +++ b/argocd-helm-charts/aws-ebs-csi-driver/Chart.yaml @@ -3,5 +3,5 @@ name: aws-ebs-csi-driver version: 1.0.0 dependencies: - name: aws-ebs-csi-driver - version: 2.32.0 + version: 2.33.0 repository: https://kubernetes-sigs.github.io/aws-ebs-csi-driver diff --git a/argocd-helm-charts/aws-ebs-csi-driver/charts/aws-ebs-csi-driver/CHANGELOG.md b/argocd-helm-charts/aws-ebs-csi-driver/charts/aws-ebs-csi-driver/CHANGELOG.md index b87ed65f4..e4816d6d6 100644 --- a/argocd-helm-charts/aws-ebs-csi-driver/charts/aws-ebs-csi-driver/CHANGELOG.md +++ b/argocd-helm-charts/aws-ebs-csi-driver/charts/aws-ebs-csi-driver/CHANGELOG.md @@ -1,4 +1,10 @@ # Helm chart +## v2.33.0 +* Bump driver version to `v1.33.0` +* Bump CSI sidecar container versions +* Add fix for enableLinux node parameter ([#2078](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/2078), [@ElijahQuinones](https://github.com/ElijahQuinones)) +* Fix dnsConfig indentation in controller template file ([#2084](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/2084), [@cHiv0rz](https://github.com/cHiv0rz)) + ## v2.32.0 * Bump driver version to `v1.32.0` * Bump CSI sidecar container versions diff --git a/argocd-helm-charts/aws-ebs-csi-driver/charts/aws-ebs-csi-driver/Chart.yaml b/argocd-helm-charts/aws-ebs-csi-driver/charts/aws-ebs-csi-driver/Chart.yaml index f22a10f72..277ddfda5 100644 --- a/argocd-helm-charts/aws-ebs-csi-driver/charts/aws-ebs-csi-driver/Chart.yaml +++ b/argocd-helm-charts/aws-ebs-csi-driver/charts/aws-ebs-csi-driver/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: 1.32.0 +appVersion: 1.33.0 description: A Helm chart for AWS EBS CSI Driver home: https://github.com/kubernetes-sigs/aws-ebs-csi-driver keywords: @@ -13,4 +13,4 @@ maintainers: name: aws-ebs-csi-driver sources: - https://github.com/kubernetes-sigs/aws-ebs-csi-driver -version: 2.32.0 +version: 2.33.0 diff --git a/argocd-helm-charts/aws-ebs-csi-driver/charts/aws-ebs-csi-driver/templates/_node.tpl b/argocd-helm-charts/aws-ebs-csi-driver/charts/aws-ebs-csi-driver/templates/_node.tpl index 383e2a21f..c0c481433 100644 --- a/argocd-helm-charts/aws-ebs-csi-driver/charts/aws-ebs-csi-driver/templates/_node.tpl +++ b/argocd-helm-charts/aws-ebs-csi-driver/charts/aws-ebs-csi-driver/templates/_node.tpl @@ -1,5 +1,5 @@ {{- define "node" }} -{{- if or (eq (default true .Values.node.enableLinux) true) }} +{{- if .Values.node.enableLinux }} --- kind: DaemonSet apiVersion: apps/v1 diff --git a/argocd-helm-charts/aws-ebs-csi-driver/charts/aws-ebs-csi-driver/templates/controller.yaml b/argocd-helm-charts/aws-ebs-csi-driver/charts/aws-ebs-csi-driver/templates/controller.yaml index 6b393f401..53c242e60 100644 --- a/argocd-helm-charts/aws-ebs-csi-driver/charts/aws-ebs-csi-driver/templates/controller.yaml +++ b/argocd-helm-charts/aws-ebs-csi-driver/charts/aws-ebs-csi-driver/templates/controller.yaml @@ -513,7 +513,7 @@ spec: {{- with .Values.controller.volumes }} {{- toYaml . | nindent 8 }} {{- end }} - {{- if .Values.controller.dnsConfig }} - dnsConfig: - {{- toYaml .Values.controller.dnsConfig | nindent 4 }} - {{- end }} + {{- if .Values.controller.dnsConfig }} + dnsConfig: + {{- toYaml .Values.controller.dnsConfig | nindent 8 }} + {{- end }} diff --git a/argocd-helm-charts/aws-ebs-csi-driver/charts/aws-ebs-csi-driver/values.yaml b/argocd-helm-charts/aws-ebs-csi-driver/charts/aws-ebs-csi-driver/values.yaml index 6b7216ff1..75a445742 100644 --- a/argocd-helm-charts/aws-ebs-csi-driver/charts/aws-ebs-csi-driver/values.yaml +++ b/argocd-helm-charts/aws-ebs-csi-driver/charts/aws-ebs-csi-driver/values.yaml @@ -17,7 +17,7 @@ sidecars: image: pullPolicy: IfNotPresent repository: public.ecr.aws/eks-distro/kubernetes-csi/external-provisioner - tag: "v5.0.1-eks-1-30-8" + tag: "v5.0.1-eks-1-30-10" logLevel: 2 # Additional parameters provided by external-provisioner. additionalArgs: [] @@ -44,7 +44,7 @@ sidecars: image: pullPolicy: IfNotPresent repository: public.ecr.aws/eks-distro/kubernetes-csi/external-attacher - tag: "v4.6.1-eks-1-30-8" + tag: "v4.6.1-eks-1-30-10" # Tune leader lease election for csi-attacher. # Leader election is on by default. leaderElection: @@ -73,7 +73,7 @@ sidecars: image: pullPolicy: IfNotPresent repository: public.ecr.aws/eks-distro/kubernetes-csi/external-snapshotter/csi-snapshotter - tag: "v8.0.1-eks-1-30-8" + tag: "v8.0.1-eks-1-30-10" logLevel: 2 # Additional parameters provided by csi-snapshotter. additionalArgs: [] @@ -89,7 +89,7 @@ sidecars: image: pullPolicy: IfNotPresent repository: public.ecr.aws/eks-distro/kubernetes-csi/livenessprobe - tag: "v2.13.0-eks-1-30-8" + tag: "v2.13.0-eks-1-30-10" # Additional parameters provided by livenessprobe. additionalArgs: [] resources: {} @@ -101,7 +101,7 @@ sidecars: image: pullPolicy: IfNotPresent repository: public.ecr.aws/eks-distro/kubernetes-csi/external-resizer - tag: "v1.11.1-eks-1-30-8" + tag: "v1.11.1-eks-1-30-10" # Tune leader lease election for csi-resizer. # Leader election is on by default. leaderElection: @@ -128,7 +128,7 @@ sidecars: image: pullPolicy: IfNotPresent repository: public.ecr.aws/eks-distro/kubernetes-csi/node-driver-registrar - tag: "v2.11.0-eks-1-30-8" + tag: "v2.11.0-eks-1-30-10" logLevel: 2 # Additional parameters provided by node-driver-registrar. additionalArgs: [] @@ -479,4 +479,4 @@ useOldCSIDriver: false helmTester: enabled: true # Supply a custom image to the ebs-csi-driver-test pod in helm-tester.yaml - image: "gcr.io/k8s-staging-test-infra/kubekins-e2e:v20240611-597c402033-master" + image: "gcr.io/k8s-staging-test-infra/kubekins-e2e:v20240705-131cd74733-master" From 73923ce759aeb134592fac66f0145cefd9f068dc Mon Sep 17 00:00:00 2001 From: Hritik Batra Date: Wed, 31 Jul 2024 20:37:48 +0530 Subject: [PATCH 03/25] [CI] Helm Chart Update aws-efs-csi-driver --- CHANGELOG.md | 1 + argocd-helm-charts/aws-efs-csi-driver/Chart.lock | 6 +++--- argocd-helm-charts/aws-efs-csi-driver/Chart.yaml | 2 +- .../charts/aws-efs-csi-driver/CHANGELOG.md | 2 ++ .../aws-efs-csi-driver/charts/aws-efs-csi-driver/Chart.yaml | 4 ++-- .../charts/aws-efs-csi-driver/templates/node-daemonset.yaml | 3 +++ .../charts/aws-efs-csi-driver/values.yaml | 3 ++- 7 files changed, 14 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5b023e98f..88bf8af82 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,3 +6,4 @@ All releases and the changes included in them (pulled from git commits added sin ## 20240731 - Updated: argo-cd from version 7.3.4 to 7.3.11 - Updated: aws-ebs-csi-driver from version 2.32.0 to 2.33.0 +- Updated: aws-efs-csi-driver from version 3.0.6 to 3.0.7 diff --git a/argocd-helm-charts/aws-efs-csi-driver/Chart.lock b/argocd-helm-charts/aws-efs-csi-driver/Chart.lock index 35f0f6fbc..bc6676f4d 100644 --- a/argocd-helm-charts/aws-efs-csi-driver/Chart.lock +++ b/argocd-helm-charts/aws-efs-csi-driver/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: aws-efs-csi-driver repository: https://kubernetes-sigs.github.io/aws-efs-csi-driver/ - version: 3.0.6 -digest: sha256:599497334339aae8f6315f1eab38291db826e8cc5f019319fef4c45e636f32bf -generated: "2024-07-09T02:53:15.33716977+05:30" + version: 3.0.7 +digest: sha256:e51d0bbde7f7001e118310524bcdfcd2f4f2575e33765fd34041965b52cf805d +generated: "2024-07-31T20:37:38.376851283+05:30" diff --git a/argocd-helm-charts/aws-efs-csi-driver/Chart.yaml b/argocd-helm-charts/aws-efs-csi-driver/Chart.yaml index a69bb55f7..5af8b2977 100644 --- a/argocd-helm-charts/aws-efs-csi-driver/Chart.yaml +++ b/argocd-helm-charts/aws-efs-csi-driver/Chart.yaml @@ -3,5 +3,5 @@ name: aws-efs-csi-driver version: 1.0.0 dependencies: - name: aws-efs-csi-driver - version: 3.0.6 + version: 3.0.7 repository: https://kubernetes-sigs.github.io/aws-efs-csi-driver/ diff --git a/argocd-helm-charts/aws-efs-csi-driver/charts/aws-efs-csi-driver/CHANGELOG.md b/argocd-helm-charts/aws-efs-csi-driver/charts/aws-efs-csi-driver/CHANGELOG.md index a0a7190dc..94dad74f8 100644 --- a/argocd-helm-charts/aws-efs-csi-driver/charts/aws-efs-csi-driver/CHANGELOG.md +++ b/argocd-helm-charts/aws-efs-csi-driver/charts/aws-efs-csi-driver/CHANGELOG.md @@ -1,4 +1,6 @@ # Helm chart +# v3.0.7 +* Bump app/driver version to `v2.0.6` # v3.0.6 * Bump app/driver version to `v2.0.5` # v3.0.5 diff --git a/argocd-helm-charts/aws-efs-csi-driver/charts/aws-efs-csi-driver/Chart.yaml b/argocd-helm-charts/aws-efs-csi-driver/charts/aws-efs-csi-driver/Chart.yaml index d6a13911f..d4122f390 100644 --- a/argocd-helm-charts/aws-efs-csi-driver/charts/aws-efs-csi-driver/Chart.yaml +++ b/argocd-helm-charts/aws-efs-csi-driver/charts/aws-efs-csi-driver/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: 2.0.5 +appVersion: 2.0.6 description: A Helm chart for AWS EFS CSI Driver home: https://github.com/kubernetes-sigs/aws-efs-csi-driver keywords: @@ -15,4 +15,4 @@ maintainers: name: aws-efs-csi-driver sources: - https://github.com/kubernetes-sigs/aws-efs-csi-driver -version: 3.0.6 +version: 3.0.7 diff --git a/argocd-helm-charts/aws-efs-csi-driver/charts/aws-efs-csi-driver/templates/node-daemonset.yaml b/argocd-helm-charts/aws-efs-csi-driver/charts/aws-efs-csi-driver/templates/node-daemonset.yaml index c472b4a92..4fa44b9cd 100644 --- a/argocd-helm-charts/aws-efs-csi-driver/charts/aws-efs-csi-driver/templates/node-daemonset.yaml +++ b/argocd-helm-charts/aws-efs-csi-driver/charts/aws-efs-csi-driver/templates/node-daemonset.yaml @@ -5,6 +5,9 @@ metadata: name: efs-csi-node labels: app.kubernetes.io/name: {{ include "aws-efs-csi-driver.name" . }} + {{- with .Values.node.additionalLabels }} + {{ toYaml . | nindent 4 }} + {{- end }} spec: selector: matchLabels: diff --git a/argocd-helm-charts/aws-efs-csi-driver/charts/aws-efs-csi-driver/values.yaml b/argocd-helm-charts/aws-efs-csi-driver/charts/aws-efs-csi-driver/values.yaml index 84b48288c..27e35f44e 100644 --- a/argocd-helm-charts/aws-efs-csi-driver/charts/aws-efs-csi-driver/values.yaml +++ b/argocd-helm-charts/aws-efs-csi-driver/charts/aws-efs-csi-driver/values.yaml @@ -11,7 +11,7 @@ useFIPS: false image: repository: public.ecr.aws/efs-csi-driver/amazon/aws-efs-csi-driver - tag: "v2.0.5" + tag: "v2.0.6" pullPolicy: IfNotPresent sidecars: @@ -144,6 +144,7 @@ node: # - 169.254.169.253 podLabels: {} podAnnotations: {} + additionalLabels: {} resources: {} # limits: From b37f0d1783754369d55bb409e1b34c2432ad193b Mon Sep 17 00:00:00 2001 From: Hritik Batra Date: Wed, 31 Jul 2024 20:38:46 +0530 Subject: [PATCH 04/25] [CI] Helm Chart Update cert-manager --- CHANGELOG.md | 1 + argocd-helm-charts/cert-manager/Chart.lock | 6 +++--- argocd-helm-charts/cert-manager/Chart.yaml | 2 +- .../cert-manager/charts/cert-manager/Chart.yaml | 4 ++-- .../cert-manager/charts/cert-manager/README.md | 6 +++--- .../charts/cert-manager/templates/podmonitor.yaml | 4 ++-- 6 files changed, 12 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 88bf8af82..e401755d9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,3 +7,4 @@ All releases and the changes included in them (pulled from git commits added sin - Updated: argo-cd from version 7.3.4 to 7.3.11 - Updated: aws-ebs-csi-driver from version 2.32.0 to 2.33.0 - Updated: aws-efs-csi-driver from version 3.0.6 to 3.0.7 +- Updated: cert-manager from version v1.15.1 to v1.15.2 diff --git a/argocd-helm-charts/cert-manager/Chart.lock b/argocd-helm-charts/cert-manager/Chart.lock index e58d02ccd..00776db73 100644 --- a/argocd-helm-charts/cert-manager/Chart.lock +++ b/argocd-helm-charts/cert-manager/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: cert-manager repository: https://charts.jetstack.io - version: v1.15.1 -digest: sha256:3442d01a22bb1e1b3eb8d00ffd462d1c31d17f49992c6c2a3dc2c159ffcbfd2f -generated: "2024-07-09T02:53:15.125672684+05:30" + version: v1.15.2 +digest: sha256:62cc40cb6fa9c0a3c7af092ab5dcf386ab8f3f312ae4a07d6eb85281639987e0 +generated: "2024-07-31T20:38:28.263777746+05:30" diff --git a/argocd-helm-charts/cert-manager/Chart.yaml b/argocd-helm-charts/cert-manager/Chart.yaml index cd09c8b9c..316f4dbe1 100644 --- a/argocd-helm-charts/cert-manager/Chart.yaml +++ b/argocd-helm-charts/cert-manager/Chart.yaml @@ -4,5 +4,5 @@ version: 1.6.1 # see latest chart here: https://artifacthub.io/packages/search?org=cert-manager dependencies: - name: cert-manager - version: v1.15.1 + version: v1.15.2 repository: https://charts.jetstack.io diff --git a/argocd-helm-charts/cert-manager/charts/cert-manager/Chart.yaml b/argocd-helm-charts/cert-manager/charts/cert-manager/Chart.yaml index 1402a651f..16ff62f93 100644 --- a/argocd-helm-charts/cert-manager/charts/cert-manager/Chart.yaml +++ b/argocd-helm-charts/cert-manager/charts/cert-manager/Chart.yaml @@ -6,7 +6,7 @@ annotations: fingerprint: 1020CF3C033D4F35BAE1C19E1226061C665DF13E url: https://cert-manager.io/public-keys/cert-manager-keyring-2021-09-20-1020CF3C033D4F35BAE1C19E1226061C665DF13E.gpg apiVersion: v2 -appVersion: v1.15.1 +appVersion: v1.15.2 description: A Helm chart for cert-manager home: https://cert-manager.io icon: https://raw.githubusercontent.com/cert-manager/community/4d35a69437d21b76322157e6284be4cd64e6d2b7/logo/logo-small.png @@ -23,4 +23,4 @@ maintainers: name: cert-manager sources: - https://github.com/cert-manager/cert-manager -version: v1.15.1 +version: v1.15.2 diff --git a/argocd-helm-charts/cert-manager/charts/cert-manager/README.md b/argocd-helm-charts/cert-manager/charts/cert-manager/README.md index c5a560726..7672ee97b 100644 --- a/argocd-helm-charts/cert-manager/charts/cert-manager/README.md +++ b/argocd-helm-charts/cert-manager/charts/cert-manager/README.md @@ -19,7 +19,7 @@ Before installing the chart, you must first install the cert-manager CustomResou This is performed in a separate step to allow you to easily uninstall and reinstall cert-manager without deleting your installed custom resources. ```bash -$ kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.crds.yaml +$ kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.2/cert-manager.crds.yaml ``` To install the chart with the release name `cert-manager`: @@ -29,7 +29,7 @@ To install the chart with the release name `cert-manager`: $ helm repo add jetstack https://charts.jetstack.io --force-update ## Install the cert-manager helm chart -$ helm install cert-manager --namespace cert-manager --version v1.15.1 jetstack/cert-manager +$ helm install cert-manager --namespace cert-manager --version v1.15.2 jetstack/cert-manager ``` In order to begin issuing certificates, you will need to set up a ClusterIssuer @@ -65,7 +65,7 @@ If you want to completely uninstall cert-manager from your cluster, you will als delete the previously installed CustomResourceDefinition resources: ```console -$ kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.crds.yaml +$ kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.2/cert-manager.crds.yaml ``` ## Configuration diff --git a/argocd-helm-charts/cert-manager/charts/cert-manager/templates/podmonitor.yaml b/argocd-helm-charts/cert-manager/charts/cert-manager/templates/podmonitor.yaml index 1adc0609c..175460ebe 100644 --- a/argocd-helm-charts/cert-manager/charts/cert-manager/templates/podmonitor.yaml +++ b/argocd-helm-charts/cert-manager/charts/cert-manager/templates/podmonitor.yaml @@ -44,7 +44,7 @@ spec: interval: {{ .Values.prometheus.podmonitor.interval }} scrapeTimeout: {{ .Values.prometheus.podmonitor.scrapeTimeout }} honorLabels: {{ .Values.prometheus.podmonitor.honorLabels }} - {{- with .Values.prometheus.servicemonitor.endpointAdditionalProperties }} - {{- toYaml . | nindent 4 }} + {{- with .Values.prometheus.podmonitor.endpointAdditionalProperties }} + {{- toYaml . | nindent 6 }} {{- end }} {{- end }} From f007935a8dc177b1861c586765a91b62e2179015 Mon Sep 17 00:00:00 2001 From: Hritik Batra Date: Wed, 31 Jul 2024 20:39:29 +0530 Subject: [PATCH 05/25] [CI] Helm Chart Update cilium --- CHANGELOG.md | 1 + argocd-helm-charts/cilium/Chart.lock | 6 +- argocd-helm-charts/cilium/Chart.yaml | 2 +- .../cilium/charts/cilium/Chart.yaml | 6 +- .../cilium/charts/cilium/README.md | 196 +- .../dashboards/cilium-dashboard.json | 14 +- .../configmap/bootstrap-config.json | 71 +- .../dashboards/cilium-operator-dashboard.json | 60 +- .../hubble/dashboards/hubble-dashboard.json | 193 +- .../dashboards/hubble-dns-namespace.json | 2 +- .../hubble-l7-http-metrics-by-workload.json | 11 +- .../hubble-network-overview-namespace.json | 2 +- .../charts/cilium/templates/_extensions.tpl | 50 + .../charts/cilium/templates/_helpers.tpl | 115 +- .../templates/cilium-agent/clusterrole.yaml | 13 +- .../cilium-agent/clusterrolebinding.yaml | 2 +- .../templates/cilium-agent/daemonset.yaml | 166 +- .../templates/cilium-agent/service.yaml | 11 +- .../cilium/templates/cilium-configmap.yaml | 133 +- .../templates/cilium-envoy/configmap.yaml | 3 +- .../templates/cilium-envoy/daemonset.yaml | 46 +- .../templates/cilium-envoy/service.yaml | 32 - .../cilium-envoy/serviceaccount.yaml | 3 +- .../cilium-envoy/servicemonitor.yaml | 3 +- .../templates/cilium-gateway-api-class.yaml | 3 +- .../templates/cilium-ingress-service.yaml | 9 +- .../templates/cilium-nodeinit/daemonset.yaml | 1 - .../cilium-operator/clusterrole.yaml | 53 +- .../cilium-operator/clusterrolebinding.yaml | 2 +- .../templates/cilium-operator/deployment.yaml | 77 +- .../cilium-operator/poddisruptionbudget.yaml | 2 +- .../cilium-preflight/clusterrole.yaml | 13 +- .../cilium-preflight/clusterrolebinding.yaml | 2 +- .../templates/cilium-preflight/daemonset.yaml | 18 +- .../cilium-preflight/deployment.yaml | 18 +- .../cilium-preflight/poddisruptionbudget.yaml | 2 +- .../clustermesh-apiserver/_helpers.tpl | 4 + .../clustermesh-apiserver/clusterrole.yaml | 6 +- .../clusterrolebinding.yaml | 2 +- .../clustermesh-apiserver/deployment.yaml | 41 +- .../poddisruptionbudget.yaml | 2 +- .../clustermesh-apiserver/service.yaml | 6 + .../tls-certmanager/admin-secret.yaml | 2 - .../tls-certmanager/local-secret.yaml | 18 + .../tls-cronjob/_job-spec.tpl | 88 +- .../tls-cronjob/cronjob.yaml | 2 +- .../tls-cronjob/role.yaml | 1 + .../tls-helm/admin-secret.yaml | 3 +- .../tls-helm/local-secret.yaml | 20 + .../tls-provided/admin-secret.yaml | 2 + .../tls-provided/client-secret.yaml | 2 + .../tls-provided/remote-secret.yaml | 2 + .../tls-provided/server-secret.yaml | 2 + .../users-configmap.yaml | 5 +- .../cilium-etcd-operator-clusterrole.yaml | 79 - ...lium-etcd-operator-clusterrolebinding.yaml | 20 - .../cilium-etcd-operator-deployment.yaml | 128 - .../cilium-etcd-operator-serviceaccount.yaml | 16 - .../etcd-operator-clusterrole.yaml | 60 - .../etcd-operator-clusterrolebinding.yaml | 20 - .../etcd-operator-serviceaccount.yaml | 16 - .../etcd-operator/poddisruptionbudget.yaml | 28 - .../templates/hubble-relay/configmap.yaml | 12 +- .../templates/hubble-relay/deployment.yaml | 68 +- .../hubble-relay/poddisruptionbudget.yaml | 2 +- .../templates/hubble-relay/service.yaml | 10 +- .../templates/hubble-ui/clusterrole.yaml | 2 +- .../hubble-ui/clusterrolebinding.yaml | 2 +- .../templates/hubble-ui/deployment.yaml | 1 - .../cilium/templates/hubble-ui/ingress.yaml | 9 +- .../hubble-ui/poddisruptionbudget.yaml | 2 +- .../cilium/templates/hubble/peer-service.yaml | 2 - .../templates/hubble/servicemonitor.yaml | 8 + .../metrics-server-secret.yaml | 32 + .../hubble/tls-cronjob/_job-spec.tpl | 112 +- .../templates/hubble/tls-cronjob/cronjob.yaml | 2 +- .../{clusterrole.yaml => role.yaml} | 7 +- ...usterrolebinding.yaml => rolebinding.yaml} | 7 +- .../templates/hubble/tls-helm/_helpers.tpl | 31 - .../tls-helm/metrics-server-secret.yaml | 22 + .../hubble/tls-helm/relay-client-secret.yaml | 6 +- .../hubble/tls-helm/relay-server-secret.yaml | 6 +- .../hubble/tls-helm/server-secret.yaml | 6 +- .../hubble/tls-helm/ui-client-certs.yaml | 6 +- .../tls-provided/metrics-server-secret.yaml | 16 + .../templates/spire/agent/clusterrole.yaml | 2 +- .../spire/agent/clusterrolebinding.yaml | 2 +- .../templates/spire/server/clusterrole.yaml | 2 +- .../spire/server/clusterrolebinding.yaml | 2 +- .../charts/cilium/templates/validate.yaml | 39 +- .../cilium/charts/cilium/values.schema.json | 5250 +++++++++++++++++ .../cilium/charts/cilium/values.yaml | 1710 +++--- .../cilium/charts/cilium/values.yaml.tmpl | 1634 ++--- 93 files changed, 8297 insertions(+), 2631 deletions(-) create mode 100644 argocd-helm-charts/cilium/charts/cilium/templates/_extensions.tpl delete mode 100644 argocd-helm-charts/cilium/charts/cilium/templates/cilium-envoy/service.yaml create mode 100644 argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-certmanager/local-secret.yaml create mode 100644 argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-helm/local-secret.yaml delete mode 100644 argocd-helm-charts/cilium/charts/cilium/templates/etcd-operator/cilium-etcd-operator-clusterrole.yaml delete mode 100644 argocd-helm-charts/cilium/charts/cilium/templates/etcd-operator/cilium-etcd-operator-clusterrolebinding.yaml delete mode 100644 argocd-helm-charts/cilium/charts/cilium/templates/etcd-operator/cilium-etcd-operator-deployment.yaml delete mode 100644 argocd-helm-charts/cilium/charts/cilium/templates/etcd-operator/cilium-etcd-operator-serviceaccount.yaml delete mode 100644 argocd-helm-charts/cilium/charts/cilium/templates/etcd-operator/etcd-operator-clusterrole.yaml delete mode 100644 argocd-helm-charts/cilium/charts/cilium/templates/etcd-operator/etcd-operator-clusterrolebinding.yaml delete mode 100644 argocd-helm-charts/cilium/charts/cilium/templates/etcd-operator/etcd-operator-serviceaccount.yaml delete mode 100644 argocd-helm-charts/cilium/charts/cilium/templates/etcd-operator/poddisruptionbudget.yaml create mode 100644 argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-certmanager/metrics-server-secret.yaml rename argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-cronjob/{clusterrole.yaml => role.yaml} (80%) rename argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-cronjob/{clusterrolebinding.yaml => rolebinding.yaml} (79%) delete mode 100644 argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-helm/_helpers.tpl create mode 100644 argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-helm/metrics-server-secret.yaml create mode 100644 argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-provided/metrics-server-secret.yaml create mode 100644 argocd-helm-charts/cilium/charts/cilium/values.schema.json diff --git a/CHANGELOG.md b/CHANGELOG.md index e401755d9..bd9322b92 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,3 +8,4 @@ All releases and the changes included in them (pulled from git commits added sin - Updated: aws-ebs-csi-driver from version 2.32.0 to 2.33.0 - Updated: aws-efs-csi-driver from version 3.0.6 to 3.0.7 - Updated: cert-manager from version v1.15.1 to v1.15.2 +- Updated: cilium from version 1.15.6 to 1.16.0 diff --git a/argocd-helm-charts/cilium/Chart.lock b/argocd-helm-charts/cilium/Chart.lock index a6bc4e719..aac94879c 100644 --- a/argocd-helm-charts/cilium/Chart.lock +++ b/argocd-helm-charts/cilium/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: cilium repository: https://helm.cilium.io/ - version: 1.15.6 -digest: sha256:82a9838958b113bc216e8c6540661f482c62ef7faccc9b4a8c8f33ad3854600f -generated: "2024-07-09T02:53:10.317421601+05:30" + version: 1.16.0 +digest: sha256:9279043720f34d7ef98a66e77b0b354252cd492a6e687b871bc3e281c11aeb16 +generated: "2024-07-31T20:39:12.179153751+05:30" diff --git a/argocd-helm-charts/cilium/Chart.yaml b/argocd-helm-charts/cilium/Chart.yaml index c392f63fc..35af75f10 100644 --- a/argocd-helm-charts/cilium/Chart.yaml +++ b/argocd-helm-charts/cilium/Chart.yaml @@ -3,5 +3,5 @@ name: cilium version: 1.0.0 dependencies: - name: cilium - version: 1.15.6 + version: 1.16.0 repository: https://helm.cilium.io/ diff --git a/argocd-helm-charts/cilium/charts/cilium/Chart.yaml b/argocd-helm-charts/cilium/charts/cilium/Chart.yaml index 50bd27f4e..007af8628 100644 --- a/argocd-helm-charts/cilium/charts/cilium/Chart.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/Chart.yaml @@ -79,7 +79,7 @@ annotations: Pod IP Pool\n description: |\n CiliumPodIPPool defines an IP pool that can be used for pooled IPAM (i.e. the multi-pool IPAM mode).\n" apiVersion: v2 -appVersion: 1.15.6 +appVersion: 1.16.0 description: eBPF-based Networking, Security, and Observability home: https://cilium.io/ icon: https://cdn.jsdelivr.net/gh/cilium/cilium@main/Documentation/images/logo-solo.svg @@ -91,8 +91,8 @@ keywords: - Security - Observability - Troubleshooting -kubeVersion: '>= 1.16.0-0' +kubeVersion: '>= 1.21.0-0' name: cilium sources: - https://github.com/cilium/cilium -version: 1.15.6 +version: 1.16.0 diff --git a/argocd-helm-charts/cilium/charts/cilium/README.md b/argocd-helm-charts/cilium/charts/cilium/README.md index 21a78f927..2c293a581 100644 --- a/argocd-helm-charts/cilium/charts/cilium/README.md +++ b/argocd-helm-charts/cilium/charts/cilium/README.md @@ -1,6 +1,6 @@ # cilium -![Version: 1.15.6](https://img.shields.io/badge/Version-1.15.6-informational?style=flat-square) ![AppVersion: 1.15.6](https://img.shields.io/badge/AppVersion-1.15.6-informational?style=flat-square) +![Version: 1.16.0](https://img.shields.io/badge/Version-1.16.0-informational?style=flat-square) ![AppVersion: 1.16.0](https://img.shields.io/badge/AppVersion-1.16.0-informational?style=flat-square) Cilium is open source software for providing and transparently securing network connectivity and loadbalancing between application workloads such as @@ -18,7 +18,7 @@ efficient and flexible. ## Prerequisites -* Kubernetes: `>= 1.16.0-0` +* Kubernetes: `>= 1.21.0-0` * Helm: `>= 3.0` ## Getting Started @@ -53,7 +53,7 @@ contributors across the globe, there is almost always someone available to help. | Key | Type | Default | Description | |-----|------|---------|-------------| -| MTU | int | `0` | Configure the underlying network MTU to overwrite auto-detected MTU. | +| MTU | int | `0` | Configure the underlying network MTU to overwrite auto-detected MTU. This value doesn't change the host network interface MTU i.e. eth0 or ens0. It changes the MTU for cilium_net@cilium_host, cilium_host@cilium_net, cilium_vxlan and lxc_health interfaces. | | affinity | object | `{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchLabels":{"k8s-app":"cilium"}},"topologyKey":"kubernetes.io/hostname"}]}}` | Affinity for cilium-agent. | | agent | bool | `true` | Install the cilium agent resources. | | agentNotReadyTaintKey | string | `"node.cilium.io/agent-not-ready"` | Configure the key of the taint indicating that Cilium is not ready on the node. When set to a value starting with `ignore-taint.cluster-autoscaler.kubernetes.io/`, the Cluster Autoscaler will ignore the taint on its decisions, allowing the cluster to scale up. | @@ -73,7 +73,7 @@ contributors across the globe, there is almost always someone available to help. | authentication.mutual.spire.enabled | bool | `false` | Enable SPIRE integration (beta) | | authentication.mutual.spire.install.agent.affinity | object | `{}` | SPIRE agent affinity configuration | | authentication.mutual.spire.install.agent.annotations | object | `{}` | SPIRE agent annotations | -| authentication.mutual.spire.install.agent.image | object | `{"digest":"sha256:99405637647968245ff9fe215f8bd2bd0ea9807be9725f8bf19fe1b21471e52b","override":null,"pullPolicy":"IfNotPresent","repository":"ghcr.io/spiffe/spire-agent","tag":"1.8.5","useDigest":true}` | SPIRE agent image | +| authentication.mutual.spire.install.agent.image | object | `{"digest":"sha256:5106ac601272a88684db14daf7f54b9a45f31f77bb16a906bd5e87756ee7b97c","override":null,"pullPolicy":"IfNotPresent","repository":"ghcr.io/spiffe/spire-agent","tag":"1.9.6","useDigest":true}` | SPIRE agent image | | authentication.mutual.spire.install.agent.labels | object | `{}` | SPIRE agent labels | | authentication.mutual.spire.install.agent.nodeSelector | object | `{}` | SPIRE agent nodeSelector configuration ref: ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector | | authentication.mutual.spire.install.agent.podSecurityContext | object | `{}` | Security context to be added to spire agent pods. SecurityContext holds pod-level security attributes and common container settings. ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod | @@ -83,7 +83,7 @@ contributors across the globe, there is almost always someone available to help. | authentication.mutual.spire.install.agent.tolerations | list | `[{"effect":"NoSchedule","key":"node.kubernetes.io/not-ready"},{"effect":"NoSchedule","key":"node-role.kubernetes.io/master"},{"effect":"NoSchedule","key":"node-role.kubernetes.io/control-plane"},{"effect":"NoSchedule","key":"node.cloudprovider.kubernetes.io/uninitialized","value":"true"},{"key":"CriticalAddonsOnly","operator":"Exists"}]` | SPIRE agent tolerations configuration By default it follows the same tolerations as the agent itself to allow the Cilium agent on this node to connect to SPIRE. ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ | | authentication.mutual.spire.install.enabled | bool | `true` | Enable SPIRE installation. This will only take effect only if authentication.mutual.spire.enabled is true | | authentication.mutual.spire.install.existingNamespace | bool | `false` | SPIRE namespace already exists. Set to true if Helm should not create, manage, and import the SPIRE namespace. | -| authentication.mutual.spire.install.initImage | object | `{"digest":"sha256:223ae047b1065bd069aac01ae3ac8088b3ca4a527827e283b85112f29385fb1b","override":null,"pullPolicy":"IfNotPresent","repository":"docker.io/library/busybox","tag":"1.36.1","useDigest":true}` | init container image of SPIRE agent and server | +| authentication.mutual.spire.install.initImage | object | `{"digest":"sha256:9ae97d36d26566ff84e8893c64a6dc4fe8ca6d1144bf5b87b2b85a32def253c7","override":null,"pullPolicy":"IfNotPresent","repository":"docker.io/library/busybox","tag":"1.36.1","useDigest":true}` | init container image of SPIRE agent and server | | authentication.mutual.spire.install.namespace | string | `"cilium-spire"` | SPIRE namespace to install into | | authentication.mutual.spire.install.server.affinity | object | `{}` | SPIRE server affinity configuration | | authentication.mutual.spire.install.server.annotations | object | `{}` | SPIRE server annotations | @@ -93,7 +93,7 @@ contributors across the globe, there is almost always someone available to help. | authentication.mutual.spire.install.server.dataStorage.enabled | bool | `true` | Enable SPIRE server data storage | | authentication.mutual.spire.install.server.dataStorage.size | string | `"1Gi"` | Size of the SPIRE server data storage | | authentication.mutual.spire.install.server.dataStorage.storageClass | string | `nil` | StorageClass of the SPIRE server data storage | -| authentication.mutual.spire.install.server.image | object | `{"digest":"sha256:28269265882048dcf0fed32fe47663cd98613727210b8d1a55618826f9bf5428","override":null,"pullPolicy":"IfNotPresent","repository":"ghcr.io/spiffe/spire-server","tag":"1.8.5","useDigest":true}` | SPIRE server image | +| authentication.mutual.spire.install.server.image | object | `{"digest":"sha256:59a0b92b39773515e25e68a46c40d3b931b9c1860bc445a79ceb45a805cab8b4","override":null,"pullPolicy":"IfNotPresent","repository":"ghcr.io/spiffe/spire-server","tag":"1.9.6","useDigest":true}` | SPIRE server image | | authentication.mutual.spire.install.server.initContainers | list | `[]` | SPIRE server init containers | | authentication.mutual.spire.install.server.labels | object | `{}` | SPIRE server labels | | authentication.mutual.spire.install.server.nodeSelector | object | `{}` | SPIRE server nodeSelector configuration ref: ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector | @@ -126,6 +126,13 @@ contributors across the globe, there is almost always someone available to help. | bpf.autoMount.enabled | bool | `true` | Enable automatic mount of BPF filesystem When `autoMount` is enabled, the BPF filesystem is mounted at `bpf.root` path on the underlying host and inside the cilium agent pod. If users disable `autoMount`, it's expected that users have mounted bpffs filesystem at the specified `bpf.root` volume, and then the volume will be mounted inside the cilium agent pod at the same path. | | bpf.ctAnyMax | int | `262144` | Configure the maximum number of entries for the non-TCP connection tracking table. | | bpf.ctTcpMax | int | `524288` | Configure the maximum number of entries in the TCP connection tracking table. | +| bpf.datapathMode | string | `veth` | Mode for Pod devices for the core datapath (veth, netkit, netkit-l2, lb-only) | +| bpf.disableExternalIPMitigation | bool | `false` | Disable ExternalIP mitigation (CVE-2020-8554) | +| bpf.enableTCX | bool | `true` | Attach endpoint programs using tcx instead of legacy tc hooks on supported kernels. | +| bpf.events | object | `{"drop":{"enabled":true},"policyVerdict":{"enabled":true},"trace":{"enabled":true}}` | Control events generated by the Cilium datapath exposed to Cilium monitor and Hubble. | +| bpf.events.drop.enabled | bool | `true` | Enable drop events. | +| bpf.events.policyVerdict.enabled | bool | `true` | Enable policy verdict events. | +| bpf.events.trace.enabled | bool | `true` | Enable trace events. | | bpf.hostLegacyRouting | bool | `false` | Configure whether direct routing mode should route traffic via host stack (true) or directly and more efficiently out of BPF (false) if the kernel supports it. The latter has the implication that it will also bypass netfilter in the host namespace. | | bpf.lbExternalClusterIP | bool | `false` | Allow cluster external access to ClusterIP services. | | bpf.lbMapMax | int | `65536` | Configure the maximum number of service entries in the load balancer maps. | @@ -143,7 +150,7 @@ contributors across the globe, there is almost always someone available to help. | bpf.tproxy | bool | `false` | Configure the eBPF-based TPROXY to reduce reliance on iptables rules for implementing Layer 7 policy. | | bpf.vlanBypass | list | `[]` | Configure explicitly allowed VLAN id's for bpf logic bypass. [0] will allow all VLAN id's without any filtering. | | bpfClockProbe | bool | `false` | Enable BPF clock source probing for more efficient tick retrieval. | -| certgen | object | `{"affinity":{},"annotations":{"cronJob":{},"job":{}},"extraVolumeMounts":[],"extraVolumes":[],"image":{"digest":"sha256:bbc5e65e9dc65bc6b58967fe536b7f3b54e12332908aeb0a96a36866b4372b4e","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/certgen","tag":"v0.1.12","useDigest":true},"podLabels":{},"tolerations":[],"ttlSecondsAfterFinished":1800}` | Configure certificate generation for Hubble integration. If hubble.tls.auto.method=cronJob, these values are used for the Kubernetes CronJob which will be scheduled regularly to (re)generate any certificates not provided manually. | +| certgen | object | `{"affinity":{},"annotations":{"cronJob":{},"job":{}},"extraVolumeMounts":[],"extraVolumes":[],"image":{"digest":"sha256:169d93fd8f2f9009db3b9d5ccd37c2b753d0989e1e7cd8fe79f9160c459eef4f","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/certgen","tag":"v0.2.0","useDigest":true},"podLabels":{},"tolerations":[],"ttlSecondsAfterFinished":1800}` | Configure certificate generation for Hubble integration. If hubble.tls.auto.method=cronJob, these values are used for the Kubernetes CronJob which will be scheduled regularly to (re)generate any certificates not provided manually. | | certgen.affinity | object | `{}` | Affinity for certgen | | certgen.annotations | object | `{"cronJob":{},"job":{}}` | Annotations to be added to the hubble-certgen initial Job and CronJob | | certgen.extraVolumeMounts | list | `[]` | Additional certgen volumeMounts. | @@ -155,28 +162,34 @@ contributors across the globe, there is almost always someone available to help. | cgroup.autoMount.enabled | bool | `true` | Enable auto mount of cgroup2 filesystem. When `autoMount` is enabled, cgroup2 filesystem is mounted at `cgroup.hostRoot` path on the underlying host and inside the cilium agent pod. If users disable `autoMount`, it's expected that users have mounted cgroup2 filesystem at the specified `cgroup.hostRoot` volume, and then the volume will be mounted inside the cilium agent pod at the same path. | | cgroup.autoMount.resources | object | `{}` | Init Container Cgroup Automount resource limits & requests | | cgroup.hostRoot | string | `"/run/cilium/cgroupv2"` | Configure cgroup root where cgroup2 filesystem is mounted on the host (see also: `cgroup.autoMount`) | +| ciliumEndpointSlice.enabled | bool | `false` | Enable Cilium EndpointSlice feature. | +| ciliumEndpointSlice.rateLimits | list | `[{"burst":20,"limit":10,"nodes":0},{"burst":15,"limit":7,"nodes":100},{"burst":10,"limit":5,"nodes":500}]` | List of rate limit options to be used for the CiliumEndpointSlice controller. Each object in the list must have the following fields: nodes: Count of nodes at which to apply the rate limit. limit: The sustained request rate in requests per second. The maximum rate that can be configured is 50. burst: The burst request rate in requests per second. The maximum burst that can be configured is 100. | | cleanBpfState | bool | `false` | Clean all eBPF datapath state from the initContainer of the cilium-agent DaemonSet. WARNING: Use with care! | | cleanState | bool | `false` | Clean all local Cilium state from the initContainer of the cilium-agent DaemonSet. Implies cleanBpfState: true. WARNING: Use with care! | | cluster.id | int | `0` | Unique ID of the cluster. Must be unique across all connected clusters and in the range of 1 to 255. Only required for Cluster Mesh, may be 0 if Cluster Mesh is not used. | -| cluster.name | string | `"default"` | Name of the cluster. Only required for Cluster Mesh and mutual authentication with SPIRE. | +| cluster.name | string | `"default"` | Name of the cluster. Only required for Cluster Mesh and mutual authentication with SPIRE. It must respect the following constraints: * It must contain at most 32 characters; * It must begin and end with a lower case alphanumeric character; * It may contain lower case alphanumeric characters and dashes between. The "default" name cannot be used if the Cluster ID is different from 0. | | clustermesh.annotations | object | `{}` | Annotations to be added to all top-level clustermesh objects (resources under templates/clustermesh-apiserver and templates/clustermesh-config) | -| clustermesh.apiserver.affinity | object | `{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchLabels":{"k8s-app":"clustermesh-apiserver"}},"topologyKey":"kubernetes.io/hostname"}]}}` | Affinity for clustermesh.apiserver | +| clustermesh.apiserver.affinity | object | `{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"podAffinityTerm":{"labelSelector":{"matchLabels":{"k8s-app":"clustermesh-apiserver"}},"topologyKey":"kubernetes.io/hostname"},"weight":100}]}}` | Affinity for clustermesh.apiserver | | clustermesh.apiserver.etcd.init.extraArgs | list | `[]` | Additional arguments to `clustermesh-apiserver etcdinit`. | | clustermesh.apiserver.etcd.init.extraEnv | list | `[]` | Additional environment variables to `clustermesh-apiserver etcdinit`. | | clustermesh.apiserver.etcd.init.resources | object | `{}` | Specifies the resources for etcd init container in the apiserver | | clustermesh.apiserver.etcd.lifecycle | object | `{}` | lifecycle setting for the etcd container | | clustermesh.apiserver.etcd.resources | object | `{}` | Specifies the resources for etcd container in the apiserver | -| clustermesh.apiserver.etcd.securityContext | object | `{}` | Security context to be added to clustermesh-apiserver etcd containers | +| clustermesh.apiserver.etcd.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]}}` | Security context to be added to clustermesh-apiserver etcd containers | +| clustermesh.apiserver.etcd.storageMedium | string | `"Disk"` | Specifies whether etcd data is stored in a temporary volume backed by the node's default medium, such as disk, SSD or network storage (Disk), or RAM (Memory). The Memory option enables improved etcd read and write performance at the cost of additional memory usage, which counts against the memory limits of the container. | | clustermesh.apiserver.extraArgs | list | `[]` | Additional clustermesh-apiserver arguments. | | clustermesh.apiserver.extraEnv | list | `[]` | Additional clustermesh-apiserver environment variables. | | clustermesh.apiserver.extraVolumeMounts | list | `[]` | Additional clustermesh-apiserver volumeMounts. | | clustermesh.apiserver.extraVolumes | list | `[]` | Additional clustermesh-apiserver volumes. | -| clustermesh.apiserver.image | object | `{"digest":"sha256:6365c2fe8a038fc7adcdeb7ffb8d7a8a2cd3ee524687f35fff9df76fafeeb029","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/clustermesh-apiserver","tag":"v1.15.6","useDigest":true}` | Clustermesh API server image. | -| clustermesh.apiserver.kvstoremesh.enabled | bool | `false` | Enable KVStoreMesh. KVStoreMesh caches the information retrieved from the remote clusters in the local etcd instance. | +| clustermesh.apiserver.healthPort | int | `9880` | TCP port for the clustermesh-apiserver health API. | +| clustermesh.apiserver.image | object | `{"digest":"sha256:a1597b7de97cfa03f1330e6b784df1721eb69494cd9efb0b3a6930680dfe7a8e","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/clustermesh-apiserver","tag":"v1.16.0","useDigest":true}` | Clustermesh API server image. | +| clustermesh.apiserver.kvstoremesh.enabled | bool | `true` | Enable KVStoreMesh. KVStoreMesh caches the information retrieved from the remote clusters in the local etcd instance. | | clustermesh.apiserver.kvstoremesh.extraArgs | list | `[]` | Additional KVStoreMesh arguments. | | clustermesh.apiserver.kvstoremesh.extraEnv | list | `[]` | Additional KVStoreMesh environment variables. | | clustermesh.apiserver.kvstoremesh.extraVolumeMounts | list | `[]` | Additional KVStoreMesh volumeMounts. | +| clustermesh.apiserver.kvstoremesh.healthPort | int | `9881` | TCP port for the KVStoreMesh health API. | | clustermesh.apiserver.kvstoremesh.lifecycle | object | `{}` | lifecycle setting for the KVStoreMesh container | +| clustermesh.apiserver.kvstoremesh.readinessProbe | object | `{}` | Configuration for the KVStoreMesh readiness probe. | | clustermesh.apiserver.kvstoremesh.resources | object | `{}` | Resource requests and limits for the KVStoreMesh container | | clustermesh.apiserver.kvstoremesh.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]}}` | KVStoreMesh Security context | | clustermesh.apiserver.lifecycle | object | `{}` | lifecycle setting for the apiserver container | @@ -205,14 +218,18 @@ contributors across the globe, there is almost always someone available to help. | clustermesh.apiserver.podDisruptionBudget.maxUnavailable | int | `1` | Maximum number/percentage of pods that may be made unavailable | | clustermesh.apiserver.podDisruptionBudget.minAvailable | string | `nil` | Minimum number/percentage of pods that should remain scheduled. When it's set, maxUnavailable must be disabled by `maxUnavailable: null` | | clustermesh.apiserver.podLabels | object | `{}` | Labels to be added to clustermesh-apiserver pods | -| clustermesh.apiserver.podSecurityContext | object | `{}` | Security context to be added to clustermesh-apiserver pods | +| clustermesh.apiserver.podSecurityContext | object | `{"fsGroup":65532,"runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532}` | Security context to be added to clustermesh-apiserver pods | | clustermesh.apiserver.priorityClassName | string | `""` | The priority class to use for clustermesh-apiserver | +| clustermesh.apiserver.readinessProbe | object | `{}` | Configuration for the clustermesh-apiserver readiness probe. | | clustermesh.apiserver.replicas | int | `1` | Number of replicas run for the clustermesh-apiserver deployment. | | clustermesh.apiserver.resources | object | `{}` | Resource requests and limits for the clustermesh-apiserver | -| clustermesh.apiserver.securityContext | object | `{}` | Security context to be added to clustermesh-apiserver containers | -| clustermesh.apiserver.service.annotations | object | `{}` | Annotations for the clustermesh-apiserver For GKE LoadBalancer, use annotation cloud.google.com/load-balancer-type: "Internal" For EKS LoadBalancer, use annotation service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 | -| clustermesh.apiserver.service.externalTrafficPolicy | string | `nil` | The externalTrafficPolicy of service used for apiserver access. | -| clustermesh.apiserver.service.internalTrafficPolicy | string | `nil` | The internalTrafficPolicy of service used for apiserver access. | +| clustermesh.apiserver.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]}}` | Security context to be added to clustermesh-apiserver containers | +| clustermesh.apiserver.service.annotations | object | `{}` | Annotations for the clustermesh-apiserver For GKE LoadBalancer, use annotation cloud.google.com/load-balancer-type: "Internal" For EKS LoadBalancer, use annotation service.beta.kubernetes.io/aws-load-balancer-internal: "true" | +| clustermesh.apiserver.service.enableSessionAffinity | string | `"HAOnly"` | Defines when to enable session affinity. Each replica in a clustermesh-apiserver deployment runs its own discrete etcd cluster. Remote clients connect to one of the replicas through a shared Kubernetes Service. A client reconnecting to a different backend will require a full resync to ensure data integrity. Session affinity can reduce the likelihood of this happening, but may not be supported by all cloud providers. Possible values: - "HAOnly" (default) Only enable session affinity for deployments with more than 1 replica. - "Always" Always enable session affinity. - "Never" Never enable session affinity. Useful in environments where session affinity is not supported, but may lead to slightly degraded performance due to more frequent reconnections. | +| clustermesh.apiserver.service.externalTrafficPolicy | string | `"Cluster"` | The externalTrafficPolicy of service used for apiserver access. | +| clustermesh.apiserver.service.internalTrafficPolicy | string | `"Cluster"` | The internalTrafficPolicy of service used for apiserver access. | +| clustermesh.apiserver.service.loadBalancerClass | string | `nil` | Configure a loadBalancerClass. Allows to configure the loadBalancerClass on the clustermesh-apiserver LB service in case the Service type is set to LoadBalancer (requires Kubernetes 1.24+). | +| clustermesh.apiserver.service.loadBalancerIP | string | `nil` | Configure a specific loadBalancerIP. Allows to configure a specific loadBalancerIP on the clustermesh-apiserver LB service in case the Service type is set to LoadBalancer. | | clustermesh.apiserver.service.nodePort | int | `32379` | Optional port to use as the node port for apiserver access. WARNING: make sure to configure a different NodePort in each cluster if kube-proxy replacement is enabled, as Cilium is currently affected by a known bug (#24692) when NodePorts are handled by the KPR implementation. If a service with the same NodePort exists both in the local and the remote cluster, all traffic originating from inside the cluster and targeting the corresponding NodePort will be redirected to a local backend, regardless of whether the destination node belongs to the local or the remote cluster. | | clustermesh.apiserver.service.type | string | `"NodePort"` | The type of service used for apiserver access. | | clustermesh.apiserver.terminationGracePeriodSeconds | int | `30` | terminationGracePeriodSeconds for the clustermesh-apiserver deployment | @@ -223,17 +240,20 @@ contributors across the globe, there is almost always someone available to help. | clustermesh.apiserver.tls.auto.certValidityDuration | int | `1095` | Generated certificates validity duration in days. | | clustermesh.apiserver.tls.auto.enabled | bool | `true` | When set to true, automatically generate a CA and certificates to enable mTLS between clustermesh-apiserver and external workload instances. If set to false, the certs to be provided by setting appropriate values below. | | clustermesh.apiserver.tls.client | object | `{"cert":"","key":""}` | base64 encoded PEM values for the clustermesh-apiserver client certificate and private key. Used if 'auto' is not enabled. | +| clustermesh.apiserver.tls.enableSecrets | bool | `true` | Allow users to provide their own certificates Users may need to provide their certificates using a mechanism that requires they provide their own secrets. This setting does not apply to any of the auto-generated mechanisms below, it only restricts the creation of secrets via the `tls-provided` templates. | | clustermesh.apiserver.tls.remote | object | `{"cert":"","key":""}` | base64 encoded PEM values for the clustermesh-apiserver remote cluster certificate and private key. Used if 'auto' is not enabled. | | clustermesh.apiserver.tls.server | object | `{"cert":"","extraDnsNames":[],"extraIpAddresses":[],"key":""}` | base64 encoded PEM values for the clustermesh-apiserver server certificate and private key. Used if 'auto' is not enabled. | | clustermesh.apiserver.tls.server.extraDnsNames | list | `[]` | Extra DNS names added to certificate when it's auto generated | | clustermesh.apiserver.tls.server.extraIpAddresses | list | `[]` | Extra IP addresses added to certificate when it's auto generated | | clustermesh.apiserver.tolerations | list | `[]` | Node tolerations for pod assignment on nodes with taints ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ | | clustermesh.apiserver.topologySpreadConstraints | list | `[]` | Pod topology spread constraints for clustermesh-apiserver | -| clustermesh.apiserver.updateStrategy | object | `{"rollingUpdate":{"maxUnavailable":1},"type":"RollingUpdate"}` | clustermesh-apiserver update strategy | +| clustermesh.apiserver.updateStrategy | object | `{"rollingUpdate":{"maxSurge":1,"maxUnavailable":0},"type":"RollingUpdate"}` | clustermesh-apiserver update strategy | | clustermesh.config | object | `{"clusters":[],"domain":"mesh.cilium.io","enabled":false}` | Clustermesh explicit configuration. | | clustermesh.config.clusters | list | `[]` | List of clusters to be peered in the mesh. | | clustermesh.config.domain | string | `"mesh.cilium.io"` | Default dns domain for the Clustermesh API servers This is used in the case cluster addresses are not provided and IPs are used. | | clustermesh.config.enabled | bool | `false` | Enable the Clustermesh explicit configuration. | +| clustermesh.enableEndpointSliceSynchronization | bool | `false` | Enable the synchronization of Kubernetes EndpointSlices corresponding to the remote endpoints of appropriately-annotated global services through ClusterMesh | +| clustermesh.enableMCSAPISupport | bool | `false` | Enable Multi-Cluster Services API support | | clustermesh.maxConnectedClusters | int | `255` | The maximum number of clusters to support in a ClusterMesh. This value cannot be changed on running clusters, and all clusters in a ClusterMesh must be configured with the same value. Values > 255 will decrease the maximum allocatable cluster-local identities. Supported values are 255 and 511. | | clustermesh.useAPIServer | bool | `false` | Deploy clustermesh-apiserver for clustermesh | | cni.binPath | string | `"/opt/cni/bin"` | Configure the path to the CNI binary directory on the host. | @@ -243,6 +263,7 @@ contributors across the globe, there is almost always someone available to help. | cni.confPath | string | `"/etc/cni/net.d"` | Configure the path to the CNI configuration directory on the host. | | cni.configMapKey | string | `"cni-config"` | Configure the key in the CNI ConfigMap to read the contents of the CNI configuration from. | | cni.customConf | bool | `false` | Skip writing of the CNI configuration. This can be used if writing of the CNI configuration is performed by external automation. | +| cni.enableRouteMTUForCNIChaining | bool | `false` | Enable route MTU for pod netns when CNI chaining is used | | cni.exclusive | bool | `true` | Make Cilium take ownership over the `/etc/cni/net.d` directory on the node, renaming all non-Cilium CNI configurations to `*.cilium_bak`. This ensures no Pods can be scheduled using other CNI plugins during Cilium agent downtime. | | cni.hostConfDirMountPath | string | `"/host/etc/cni/net.d"` | Configure the path to where the CNI configuration directory is mounted inside the agent pod. | | cni.install | bool | `true` | Install the CNI configuration and binary files into the filesystem. | @@ -251,8 +272,6 @@ contributors across the globe, there is almost always someone available to help. | cni.uninstall | bool | `false` | Remove the CNI configuration and binary files on agent shutdown. Enable this if you're removing Cilium from the cluster. Disable this to prevent the CNI configuration file from being removed during agent upgrade, which can cause nodes to go unmanageable. | | conntrackGCInterval | string | `"0s"` | Configure how frequently garbage collection should occur for the datapath connection tracking table. | | conntrackGCMaxInterval | string | `""` | Configure the maximum frequency for the garbage collection of the connection tracking table. Only affects the automatic computation for the frequency and has no effect when 'conntrackGCInterval' is set. This can be set to more frequently clean up unused identities created from ToFQDN policies. | -| containerRuntime | object | `{"integration":"none"}` | Configure container runtime specific integration. Deprecated in favor of bpf.autoMount.enabled. To be removed in 1.15. | -| containerRuntime.integration | string | `"none"` | Enables specific integrations for container runtimes. Supported values: - crio - none | | crdWaitTimeout | string | `"5m"` | Configure timeout in which Cilium will exit if CRDs are not available | | customCalls | object | `{"enabled":false}` | Tail call hooks for custom eBPF programs. | | customCalls.enabled | bool | `false` | Enable tail call hooks for custom eBPF programs. | @@ -263,6 +282,7 @@ contributors across the globe, there is almost always someone available to help. | dashboards | object | `{"annotations":{},"enabled":false,"label":"grafana_dashboard","labelValue":"1","namespace":null}` | Grafana dashboards for cilium-agent grafana can import dashboards based on the label and value ref: https://github.com/grafana/helm-charts/tree/main/charts/grafana#sidecar-for-dashboards | | debug.enabled | bool | `false` | Enable debug logging | | debug.verbose | string | `nil` | Configure verbosity levels for debug logging This option is used to enable debug messages for operations related to such sub-system such as (e.g. kvstore, envoy, datapath or policy), and flow is for enabling debug messages emitted per request, message and connection. Multiple values can be set via a space-separated string (e.g. "datapath envoy"). Applicable values: - flow - kvstore - envoy - datapath - policy | +| directRoutingSkipUnreachable | bool | `false` | Enable skipping of PodCIDR routes between worker nodes if the worker nodes are in a different L2 network segment. | | disableEndpointCRD | bool | `false` | Disable the usage of CiliumEndpoint CRD. | | dnsPolicy | string | `""` | DNS policy for Cilium agent pods. Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy | | dnsProxy.dnsRejectResponseCode | string | `"refused"` | DNS response code for rejecting DNS requests, available options are '[nameError refused]'. | @@ -274,10 +294,10 @@ contributors across the globe, there is almost always someone available to help. | dnsProxy.preCache | string | `""` | DNS cache data at this path is preloaded on agent startup. | | dnsProxy.proxyPort | int | `0` | Global port on which the in-agent DNS proxy should listen. Default 0 is a OS-assigned port. | | dnsProxy.proxyResponseMaxDelay | string | `"100ms"` | The maximum time the DNS proxy holds an allowed DNS response before sending it along. Responses are sent as soon as the datapath is updated with the new IP information. | +| dnsProxy.socketLingerTimeout | int | `10` | Timeout (in seconds) when closing the connection between the DNS proxy and the upstream server. If set to 0, the connection is closed immediately (with TCP RST). If set to -1, the connection is closed asynchronously in the background. | | egressGateway.enabled | bool | `false` | Enables egress gateway to redirect and SNAT the traffic that leaves the cluster. | -| egressGateway.installRoutes | bool | `false` | Deprecated without a replacement necessary. | | egressGateway.reconciliationTriggerInterval | string | `"1s"` | Time between triggers of egress gateway state reconciliations | -| enableCiliumEndpointSlice | bool | `false` | Enable CiliumEndpointSlice feature. | +| enableCiliumEndpointSlice | bool | `false` | Enable CiliumEndpointSlice feature (deprecated, please use `ciliumEndpointSlice.enabled` instead). | | enableCriticalPriorityClass | bool | `true` | Explicitly enable or disable priority class. .Capabilities.KubeVersion is unsettable in `helm template` calls, it depends on k8s libraries version that Helm was compiled against. This option allows to explicitly disable setting the priority class, which is useful for rendering charts for gke clusters in advance. | | enableIPv4BIGTCP | bool | `false` | Enables IPv4 BIG TCP support which increases maximum IPv4 GSO/GRO limits for nodes and pods | | enableIPv4Masquerade | bool | `true` | Enables masquerading of IPv4 traffic leaving the node from endpoints. | @@ -285,30 +305,26 @@ contributors across the globe, there is almost always someone available to help. | enableIPv6Masquerade | bool | `true` | Enables masquerading of IPv6 traffic leaving the node from endpoints. | | enableK8sTerminatingEndpoint | bool | `true` | Configure whether to enable auto detect of terminating state for endpoints in order to support graceful termination. | | enableMasqueradeRouteSource | bool | `false` | Enables masquerading to the source of the route for traffic leaving the node from endpoints. | -| enableRuntimeDeviceDetection | bool | `false` | Enables experimental support for the detection of new and removed datapath devices. When devices change the eBPF datapath is reloaded and services updated. If "devices" is set then only those devices, or devices matching a wildcard will be considered. | +| enableRuntimeDeviceDetection | bool | `true` | Enables experimental support for the detection of new and removed datapath devices. When devices change the eBPF datapath is reloaded and services updated. If "devices" is set then only those devices, or devices matching a wildcard will be considered. This option has been deprecated and is a no-op. | | enableXTSocketFallback | bool | `true` | Enables the fallback compatibility solution for when the xt_socket kernel module is missing and it is needed for the datapath L7 redirection to work properly. See documentation for details on when this can be disabled: https://docs.cilium.io/en/stable/operations/system_requirements/#linux-kernel. | | encryption.enabled | bool | `false` | Enable transparent network encryption. | -| encryption.interface | string | `""` | Deprecated in favor of encryption.ipsec.interface. To be removed in 1.15. The interface to use for encrypted traffic. This option is only effective when encryption.type is set to ipsec. | +| encryption.ipsec.encryptedOverlay | bool | `false` | Enable IPsec encrypted overlay | | encryption.ipsec.interface | string | `""` | The interface to use for encrypted traffic. | -| encryption.ipsec.keyFile | string | `""` | Name of the key file inside the Kubernetes secret configured via secretName. | +| encryption.ipsec.keyFile | string | `"keys"` | Name of the key file inside the Kubernetes secret configured via secretName. | | encryption.ipsec.keyRotationDuration | string | `"5m"` | Maximum duration of the IPsec key rotation. The previous key will be removed after that delay. | | encryption.ipsec.keyWatcher | bool | `true` | Enable the key watcher. If disabled, a restart of the agent will be necessary on key rotations. | -| encryption.ipsec.mountPath | string | `""` | Path to mount the secret inside the Cilium pod. | -| encryption.ipsec.secretName | string | `""` | Name of the Kubernetes secret containing the encryption keys. | -| encryption.keyFile | string | `"keys"` | Deprecated in favor of encryption.ipsec.keyFile. To be removed in 1.15. Name of the key file inside the Kubernetes secret configured via secretName. This option is only effective when encryption.type is set to ipsec. | -| encryption.mountPath | string | `"/etc/ipsec"` | Deprecated in favor of encryption.ipsec.mountPath. To be removed in 1.15. Path to mount the secret inside the Cilium pod. This option is only effective when encryption.type is set to ipsec. | +| encryption.ipsec.mountPath | string | `"/etc/ipsec"` | Path to mount the secret inside the Cilium pod. | +| encryption.ipsec.secretName | string | `"cilium-ipsec-keys"` | Name of the Kubernetes secret containing the encryption keys. | | encryption.nodeEncryption | bool | `false` | Enable encryption for pure node to node traffic. This option is only effective when encryption.type is set to "wireguard". | -| encryption.secretName | string | `"cilium-ipsec-keys"` | Deprecated in favor of encryption.ipsec.secretName. To be removed in 1.15. Name of the Kubernetes secret containing the encryption keys. This option is only effective when encryption.type is set to ipsec. | | encryption.strictMode | object | `{"allowRemoteNodeIdentities":false,"cidr":"","enabled":false}` | Configure the WireGuard Pod2Pod strict mode. | | encryption.strictMode.allowRemoteNodeIdentities | bool | `false` | Allow dynamic lookup of remote node identities. This is required when tunneling is used or direct routing is used and the node CIDR and pod CIDR overlap. | | encryption.strictMode.cidr | string | `""` | CIDR for the WireGuard Pod2Pod strict mode. | | encryption.strictMode.enabled | bool | `false` | Enable WireGuard Pod2Pod strict mode. | | encryption.type | string | `"ipsec"` | Encryption method. Can be either ipsec or wireguard. | -| encryption.wireguard.persistentKeepalive | string | `"0s"` | Controls Wireguard PersistentKeepalive option. Set 0s to disable. | -| encryption.wireguard.userspaceFallback | bool | `false` | Enables the fallback to the user-space implementation. | +| encryption.wireguard.persistentKeepalive | string | `"0s"` | Controls WireGuard PersistentKeepalive option. Set 0s to disable. | +| encryption.wireguard.userspaceFallback | bool | `false` | Enables the fallback to the user-space implementation (deprecated). | | endpointHealthChecking.enabled | bool | `true` | Enable connectivity health checking between virtual endpoints. | | endpointRoutes.enabled | bool | `false` | Enable use of per endpoint routes instead of routing via the cilium_host interface. | -| endpointStatus | object | `{"enabled":false,"status":""}` | Enable endpoint status. Status can be: policy, health, controllers, log and / or state. For 2 or more options use a space. | | eni.awsEnablePrefixDelegation | bool | `false` | Enable ENI prefix delegation | | eni.awsReleaseExcessIPs | bool | `false` | Release IPs not used from the ENI | | eni.ec2APIEndpoint | string | `""` | EC2 API endpoint to use | @@ -323,9 +339,12 @@ contributors across the globe, there is almost always someone available to help. | eni.updateEC2AdapterLimitViaAPI | bool | `true` | Update ENI Adapter limits from the EC2 API | | envoy.affinity | object | `{"nodeAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":{"nodeSelectorTerms":[{"matchExpressions":[{"key":"cilium.io/no-schedule","operator":"NotIn","values":["true"]}]}]}},"podAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchLabels":{"k8s-app":"cilium"}},"topologyKey":"kubernetes.io/hostname"}]},"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchLabels":{"k8s-app":"cilium-envoy"}},"topologyKey":"kubernetes.io/hostname"}]}}` | Affinity for cilium-envoy. | | envoy.annotations | object | `{}` | Annotations to be added to all top-level cilium-envoy objects (resources under templates/cilium-envoy) | +| envoy.baseID | int | `0` | Set Envoy'--base-id' to use when allocating shared memory regions. Only needs to be changed if multiple Envoy instances will run on the same node and may have conflicts. Supported values: 0 - 4294967295. Defaults to '0' | | envoy.connectTimeoutSeconds | int | `2` | Time in seconds after which a TCP connection attempt times out | +| envoy.debug.admin.enabled | bool | `false` | Enable admin interface for cilium-envoy. This is useful for debugging and should not be enabled in production. | +| envoy.debug.admin.port | int | `9901` | Port number (bound to loopback interface). kubectl port-forward can be used to access the admin interface. | | envoy.dnsPolicy | string | `nil` | DNS policy for Cilium envoy pods. Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy | -| envoy.enabled | bool | `false` | Enable Envoy Proxy in standalone DaemonSet. | +| envoy.enabled | string | `true` for new installation | Enable Envoy Proxy in standalone DaemonSet. This field is enabled by default for new installation. | | envoy.extraArgs | list | `[]` | Additional envoy container arguments. | | envoy.extraContainers | list | `[]` | Additional containers added to the cilium Envoy DaemonSet. | | envoy.extraEnv | list | `[]` | Additional envoy container environment variables. | @@ -334,7 +353,7 @@ contributors across the globe, there is almost always someone available to help. | envoy.extraVolumes | list | `[]` | Additional envoy volumes. | | envoy.healthPort | int | `9878` | TCP port for the health API. | | envoy.idleTimeoutDurationSeconds | int | `60` | Set Envoy upstream HTTP idle connection timeout seconds. Does not apply to connections with pending requests. Default 60s | -| envoy.image | object | `{"digest":"sha256:b528b291561e459024f66414ac3325b88cdd8f9f4854828a155a11e5b10b78a3","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium-envoy","tag":"v1.28.4-b35188ffa1bbe54d1720d2e392779f7a48e58f6b","useDigest":true}` | Envoy container image. | +| envoy.image | object | `{"digest":"sha256:bd5ff8c66716080028f414ec1cb4f7dc66f40d2fb5a009fff187f4a9b90b566b","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium-envoy","tag":"v1.29.7-39a2a56bbd5b3a591f69dbca51d3e30ef97e0e51","useDigest":true}` | Envoy container image. | | envoy.livenessProbe.failureThreshold | int | `10` | failure threshold of liveness probe | | envoy.livenessProbe.periodSeconds | int | `30` | interval between checks of the liveness probe | | envoy.log.format | string | `"[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v"` | The format string to use for laying out the log message metadata of Envoy. | @@ -360,7 +379,8 @@ contributors across the globe, there is almost always someone available to help. | envoy.readinessProbe.periodSeconds | int | `30` | interval between checks of the readiness probe | | envoy.resources | object | `{}` | Envoy resource limits & requests ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | | envoy.rollOutPods | bool | `false` | Roll out cilium envoy pods automatically when configmap is updated. | -| envoy.securityContext.capabilities.envoy | list | `["NET_ADMIN","SYS_ADMIN"]` | Capabilities for the `cilium-envoy` container | +| envoy.securityContext.capabilities.envoy | list | `["NET_ADMIN","SYS_ADMIN"]` | Capabilities for the `cilium-envoy` container. Even though granted to the container, the cilium-envoy-starter wrapper drops all capabilities after forking the actual Envoy process. `NET_BIND_SERVICE` is the only capability that can be passed to the Envoy process by setting `envoy.securityContext.capabilities.keepNetBindService=true` (in addition to granting the capability to the container). Note: In case of embedded envoy, the capability must be granted to the cilium-agent container. | +| envoy.securityContext.capabilities.keepCapNetBindService | bool | `false` | Keep capability `NET_BIND_SERVICE` for Envoy process. | | envoy.securityContext.privileged | bool | `false` | Run the pod with elevated privileges | | envoy.securityContext.seLinuxOptions | object | `{"level":"s0","type":"spc_t"}` | SELinux options for the `cilium-envoy` container | | envoy.startupProbe.failureThreshold | int | `105` | failure threshold of startup probe. 105 x 2s translates to the old behaviour of the readiness probe (120s delay + 30 x 3s) | @@ -371,32 +391,13 @@ contributors across the globe, there is almost always someone available to help. | envoy.xffNumTrustedHopsL7PolicyEgress | int | `0` | Number of trusted hops regarding the x-forwarded-for and related HTTP headers for the egress L7 policy enforcement Envoy listeners. | | envoy.xffNumTrustedHopsL7PolicyIngress | int | `0` | Number of trusted hops regarding the x-forwarded-for and related HTTP headers for the ingress L7 policy enforcement Envoy listeners. | | envoyConfig.enabled | bool | `false` | Enable CiliumEnvoyConfig CRD CiliumEnvoyConfig CRD can also be implicitly enabled by other options. | +| envoyConfig.retryInterval | string | `"15s"` | Interval in which an attempt is made to reconcile failed EnvoyConfigs. If the duration is zero, the retry is deactivated. | | envoyConfig.secretsNamespace | object | `{"create":true,"name":"cilium-secrets"}` | SecretsNamespace is the namespace in which envoy SDS will retrieve secrets from. | | envoyConfig.secretsNamespace.create | bool | `true` | Create secrets namespace for CiliumEnvoyConfig CRDs. | | envoyConfig.secretsNamespace.name | string | `"cilium-secrets"` | The name of the secret namespace to which Cilium agents are given read access. | -| etcd.annotations | object | `{}` | Annotations to be added to all top-level etcd-operator objects (resources under templates/etcd-operator) | -| etcd.clusterDomain | string | `"cluster.local"` | Cluster domain for cilium-etcd-operator. | | etcd.enabled | bool | `false` | Enable etcd mode for the agent. | -| etcd.endpoints | list | `["https://CHANGE-ME:2379"]` | List of etcd endpoints (not needed when using managed=true). | -| etcd.extraArgs | list | `[]` | Additional cilium-etcd-operator container arguments. | -| etcd.extraVolumeMounts | list | `[]` | Additional cilium-etcd-operator volumeMounts. | -| etcd.extraVolumes | list | `[]` | Additional cilium-etcd-operator volumes. | -| etcd.image | object | `{"digest":"sha256:04b8327f7f992693c2cb483b999041ed8f92efc8e14f2a5f3ab95574a65ea2dc","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium-etcd-operator","tag":"v2.0.7","useDigest":true}` | cilium-etcd-operator image. | -| etcd.k8sService | bool | `false` | If etcd is behind a k8s service set this option to true so that Cilium does the service translation automatically without requiring a DNS to be running. | -| etcd.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for cilium-etcd-operator pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector | -| etcd.podAnnotations | object | `{}` | Annotations to be added to cilium-etcd-operator pods | -| etcd.podDisruptionBudget.enabled | bool | `false` | enable PodDisruptionBudget ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ | -| etcd.podDisruptionBudget.maxUnavailable | int | `1` | Maximum number/percentage of pods that may be made unavailable | -| etcd.podDisruptionBudget.minAvailable | string | `nil` | Minimum number/percentage of pods that should remain scheduled. When it's set, maxUnavailable must be disabled by `maxUnavailable: null` | -| etcd.podLabels | object | `{}` | Labels to be added to cilium-etcd-operator pods | -| etcd.podSecurityContext | object | `{}` | Security context to be added to cilium-etcd-operator pods | -| etcd.priorityClassName | string | `""` | The priority class to use for cilium-etcd-operator | -| etcd.resources | object | `{}` | cilium-etcd-operator resource limits & requests ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | -| etcd.securityContext | object | `{}` | Security context to be added to cilium-etcd-operator pods | -| etcd.ssl | bool | `false` | Enable use of TLS/SSL for connectivity to etcd. (auto-enabled if managed=true) | -| etcd.tolerations | list | `[{"operator":"Exists"}]` | Node tolerations for cilium-etcd-operator scheduling to nodes with taints ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ | -| etcd.topologySpreadConstraints | list | `[]` | Pod topology spread constraints for cilium-etcd-operator | -| etcd.updateStrategy | object | `{"rollingUpdate":{"maxSurge":1,"maxUnavailable":1},"type":"RollingUpdate"}` | cilium-etcd-operator update strategy | +| etcd.endpoints | list | `["https://CHANGE-ME:2379"]` | List of etcd endpoints | +| etcd.ssl | bool | `false` | Enable use of TLS/SSL for connectivity to etcd. | | externalIPs.enabled | bool | `false` | Enable ExternalIPs service support. | | externalWorkloads | object | `{"enabled":false}` | Configure external workloads support | | externalWorkloads.enabled | bool | `false` | Enable support for external workloads, such as VMs (false by default). | @@ -405,13 +406,23 @@ contributors across the globe, there is almost always someone available to help. | extraContainers | list | `[]` | Additional containers added to the cilium DaemonSet. | | extraEnv | list | `[]` | Additional agent container environment variables. | | extraHostPathMounts | list | `[]` | Additional agent hostPath mounts. | +| extraInitContainers | list | `[]` | Additional initContainers added to the cilium Daemonset. | | extraVolumeMounts | list | `[]` | Additional agent volumeMounts. | | extraVolumes | list | `[]` | Additional agent volumes. | +| forceDeviceDetection | bool | `false` | Forces the auto-detection of devices, even if specific devices are explicitly listed | +| gatewayAPI.enableAlpn | bool | `false` | Enable ALPN for all listeners configured with Gateway API. ALPN will attempt HTTP/2, then HTTP 1.1. Note that this will also enable `appProtocol` support, and services that wish to use HTTP/2 will need to indicate that via their `appProtocol`. | +| gatewayAPI.enableAppProtocol | bool | `false` | Enable Backend Protocol selection support (GEP-1911) for Gateway API via appProtocol. | +| gatewayAPI.enableProxyProtocol | bool | `false` | Enable proxy protocol for all GatewayAPI listeners. Note that _only_ Proxy protocol traffic will be accepted once this is enabled. | | gatewayAPI.enabled | bool | `false` | Enable support for Gateway API in cilium This will automatically set enable-envoy-config as well. | +| gatewayAPI.externalTrafficPolicy | string | `"Cluster"` | Control how traffic from external sources is routed to the LoadBalancer Kubernetes Service for all Cilium GatewayAPI Gateway instances. Valid values are "Cluster" and "Local". Note that this value will be ignored when `hostNetwork.enabled == true`. ref: https://kubernetes.io/docs/reference/networking/virtual-ips/#external-traffic-policy | +| gatewayAPI.gatewayClass.create | string | `"auto"` | Enable creation of GatewayClass resource The default value is 'auto' which decides according to presence of gateway.networking.k8s.io/v1/GatewayClass in the cluster. Other possible values are 'true' and 'false', which will either always or never create the GatewayClass, respectively. | +| gatewayAPI.hostNetwork.enabled | bool | `false` | Configure whether the Envoy listeners should be exposed on the host network. | +| gatewayAPI.hostNetwork.nodes.matchLabels | object | `{}` | Specify the labels of the nodes where the Ingress listeners should be exposed matchLabels: kubernetes.io/os: linux kubernetes.io/hostname: kind-worker | | gatewayAPI.secretsNamespace | object | `{"create":true,"name":"cilium-secrets","sync":true}` | SecretsNamespace is the namespace in which envoy SDS will retrieve TLS secrets from. | | gatewayAPI.secretsNamespace.create | bool | `true` | Create secrets namespace for Gateway API. | | gatewayAPI.secretsNamespace.name | string | `"cilium-secrets"` | Name of Gateway API secret namespace. | | gatewayAPI.secretsNamespace.sync | bool | `true` | Enable secret sync, which will make sure all TLS secrets used by Ingress are synced to secretsNamespace.name. If disabled, TLS secrets must be maintained externally. | +| gatewayAPI.xffNumTrustedHops | int | `0` | The number of additional GatewayAPI proxy hops from the right side of the HTTP header to trust when determining the origin client's IP address. | | gke.enabled | bool | `false` | Enable Google Kubernetes Engine integration | | healthChecking | bool | `true` | Enable connectivity health checking. | | healthPort | int | `9879` | TCP port for the agent health API. This is not the port for cilium-health. | @@ -421,6 +432,9 @@ contributors across the globe, there is almost always someone available to help. | hostFirewall.enabled | bool | `false` | Enables the enforcement of host policies in the eBPF datapath. | | hostPort.enabled | bool | `false` | Enable hostPort service support. | | hubble.annotations | object | `{}` | Annotations to be added to all top-level hubble objects (resources under templates/hubble) | +| hubble.dropEventEmitter | object | `{"enabled":false,"interval":"2m","reasons":["auth_required","policy_denied"]}` | Emit v1.Events related to pods on detection of packet drops. This feature is alpha, please provide feedback at https://github.com/cilium/cilium/issues/33975. | +| hubble.dropEventEmitter.interval | string | `"2m"` | - Minimum time between emitting same events. | +| hubble.dropEventEmitter.reasons | list | `["auth_required","policy_denied"]` | - Drop reasons to emit events for. ref: https://docs.cilium.io/en/stable/_api/v1/flow/README/#dropreason | | hubble.enabled | bool | `true` | Enable Hubble (true by default). | | hubble.export | object | `{"dynamic":{"config":{"configMapName":"cilium-flowlog-config","content":[{"excludeFilters":[],"fieldMask":[],"filePath":"/var/run/cilium/hubble/events.log","includeFilters":[],"name":"all"}],"createConfigMap":true},"enabled":false},"fileMaxBackups":5,"fileMaxSizeMb":10,"static":{"allowList":[],"denyList":[],"enabled":false,"fieldMask":[],"filePath":"/var/run/cilium/hubble/events.log"}}` | Hubble flows export. | | hubble.export.dynamic | object | `{"config":{"configMapName":"cilium-flowlog-config","content":[{"excludeFilters":[],"fieldMask":[],"filePath":"/var/run/cilium/hubble/events.log","includeFilters":[],"name":"all"}],"createConfigMap":true},"enabled":false}` | - Dynamic exporters configuration. Dynamic exporters may be reconfigured without a need of agent restarts. | @@ -431,7 +445,7 @@ contributors across the globe, there is almost always someone available to help. | hubble.export.fileMaxSizeMb | int | `10` | - Defines max file size of output file before it gets rotated. | | hubble.export.static | object | `{"allowList":[],"denyList":[],"enabled":false,"fieldMask":[],"filePath":"/var/run/cilium/hubble/events.log"}` | - Static exporter configuration. Static exporter is bound to agent lifecycle. | | hubble.listenAddress | string | `":4244"` | An additional address for Hubble to listen to. Set this field ":4244" if you are enabling Hubble Relay, as it assumes that Hubble is listening on port 4244. | -| hubble.metrics | object | `{"dashboards":{"annotations":{},"enabled":false,"label":"grafana_dashboard","labelValue":"1","namespace":null},"enableOpenMetrics":false,"enabled":null,"port":9965,"serviceAnnotations":{},"serviceMonitor":{"annotations":{},"enabled":false,"interval":"10s","jobLabel":"","labels":{},"metricRelabelings":null,"relabelings":[{"replacement":"${1}","sourceLabels":["__meta_kubernetes_pod_node_name"],"targetLabel":"node"}]}}` | Hubble metrics configuration. See https://docs.cilium.io/en/stable/observability/metrics/#hubble-metrics for more comprehensive documentation about Hubble metrics. | +| hubble.metrics | object | `{"dashboards":{"annotations":{},"enabled":false,"label":"grafana_dashboard","labelValue":"1","namespace":null},"enableOpenMetrics":false,"enabled":null,"port":9965,"serviceAnnotations":{},"serviceMonitor":{"annotations":{},"enabled":false,"interval":"10s","jobLabel":"","labels":{},"metricRelabelings":null,"relabelings":[{"replacement":"${1}","sourceLabels":["__meta_kubernetes_pod_node_name"],"targetLabel":"node"}],"tlsConfig":{}},"tls":{"enabled":false,"server":{"cert":"","extraDnsNames":[],"extraIpAddresses":[],"key":"","mtls":{"enabled":false,"key":"ca.crt","name":null,"useSecret":false}}}}` | Hubble metrics configuration. See https://docs.cilium.io/en/stable/observability/metrics/#hubble-metrics for more comprehensive documentation about Hubble metrics. | | hubble.metrics.dashboards | object | `{"annotations":{},"enabled":false,"label":"grafana_dashboard","labelValue":"1","namespace":null}` | Grafana dashboards for hubble grafana can import dashboards based on the label and value ref: https://github.com/grafana/helm-charts/tree/main/charts/grafana#sidecar-for-dashboards | | hubble.metrics.enableOpenMetrics | bool | `false` | Enables exporting hubble metrics in OpenMetrics format. | | hubble.metrics.enabled | string | `nil` | Configures the list of metrics to collect. If empty or null, metrics are disabled. Example: enabled: - dns:query;ignoreAAAA - drop - tcp - flow - icmp - http You can specify the list of metrics from the helm CLI: --set hubble.metrics.enabled="{dns:query;ignoreAAAA,drop,tcp,flow,icmp,http}" | @@ -444,6 +458,13 @@ contributors across the globe, there is almost always someone available to help. | hubble.metrics.serviceMonitor.labels | object | `{}` | Labels to add to ServiceMonitor hubble | | hubble.metrics.serviceMonitor.metricRelabelings | string | `nil` | Metrics relabeling configs for the ServiceMonitor hubble | | hubble.metrics.serviceMonitor.relabelings | list | `[{"replacement":"${1}","sourceLabels":["__meta_kubernetes_pod_node_name"],"targetLabel":"node"}]` | Relabeling configs for the ServiceMonitor hubble | +| hubble.metrics.tls.server.cert | string | `""` | base64 encoded PEM values for the Hubble metrics server certificate. | +| hubble.metrics.tls.server.extraDnsNames | list | `[]` | Extra DNS names added to certificate when it's auto generated | +| hubble.metrics.tls.server.extraIpAddresses | list | `[]` | Extra IP addresses added to certificate when it's auto generated | +| hubble.metrics.tls.server.key | string | `""` | base64 encoded PEM values for the Hubble metrics server key. | +| hubble.metrics.tls.server.mtls | object | `{"enabled":false,"key":"ca.crt","name":null,"useSecret":false}` | Configure mTLS for the Hubble metrics server. | +| hubble.metrics.tls.server.mtls.key | string | `"ca.crt"` | Entry of the ConfigMap containing the CA. | +| hubble.metrics.tls.server.mtls.name | string | `nil` | Name of the ConfigMap containing the CA to validate client certificates against. If mTLS is enabled and this is unspecified, it will default to the same CA used for Hubble metrics server certificates. | | hubble.peerService.clusterDomain | string | `"cluster.local"` | The cluster domain to use to query the Hubble Peer service. It should be the local cluster. | | hubble.peerService.targetPort | int | `4244` | Target Port for the Peer service, must match the hubble.listenAddress' port. | | hubble.preferIpv6 | bool | `false` | Whether Hubble should prefer to announce IPv6 or IPv4 addresses if both are available. | @@ -462,7 +483,7 @@ contributors across the globe, there is almost always someone available to help. | hubble.relay.extraVolumes | list | `[]` | Additional hubble-relay volumes. | | hubble.relay.gops.enabled | bool | `true` | Enable gops for hubble-relay | | hubble.relay.gops.port | int | `9893` | Configure gops listen port for hubble-relay | -| hubble.relay.image | object | `{"digest":"sha256:a0863dd70d081b273b87b9b7ce7e2d3f99171c2f5e202cd57bc6691e51283e0c","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-relay","tag":"v1.15.6","useDigest":true}` | Hubble-relay container image. | +| hubble.relay.image | object | `{"digest":"sha256:33fca7776fc3d7b2abe08873319353806dc1c5e07e12011d7da4da05f836ce8d","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-relay","tag":"v1.16.0","useDigest":true}` | Hubble-relay container image. | | hubble.relay.listenHost | string | `""` | Host to listen to. Specify an empty string to bind to all the interfaces. | | hubble.relay.listenPort | string | `"4245"` | Port to listen to. | | hubble.relay.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector | @@ -492,7 +513,7 @@ contributors across the globe, there is almost always someone available to help. | hubble.relay.service.nodePort | int | `31234` | - The port to use when the service type is set to NodePort. | | hubble.relay.service.type | string | `"ClusterIP"` | - The type of service used for Hubble Relay access, either ClusterIP or NodePort. | | hubble.relay.sortBufferDrainTimeout | string | `nil` | When the per-request flows sort buffer is not full, a flow is drained every time this timeout is reached (only affects requests in follow-mode) (e.g. "1s"). | -| hubble.relay.sortBufferLenMax | string | `nil` | Max number of flows that can be buffered for sorting before being sent to the client (per request) (e.g. 100). | +| hubble.relay.sortBufferLenMax | int | `nil` | Max number of flows that can be buffered for sorting before being sent to the client (per request) (e.g. 100). | | hubble.relay.terminationGracePeriodSeconds | int | `1` | Configure termination grace period for hubble relay Deployment. | | hubble.relay.tls | object | `{"client":{"cert":"","key":""},"server":{"cert":"","enabled":false,"extraDnsNames":[],"extraIpAddresses":[],"key":"","mtls":false,"relayName":"ui.hubble-relay.cilium.io"}}` | TLS configuration for Hubble Relay | | hubble.relay.tls.client | object | `{"cert":"","key":""}` | base64 encoded PEM values for the hubble-relay client certificate and private key This keypair is presented to Hubble server instances for mTLS authentication and is required when hubble.tls.enabled is true. These values need to be set manually if hubble.tls.auto.enabled is false. | @@ -520,7 +541,7 @@ contributors across the globe, there is almost always someone available to help. | hubble.ui.backend.extraEnv | list | `[]` | Additional hubble-ui backend environment variables. | | hubble.ui.backend.extraVolumeMounts | list | `[]` | Additional hubble-ui backend volumeMounts. | | hubble.ui.backend.extraVolumes | list | `[]` | Additional hubble-ui backend volumes. | -| hubble.ui.backend.image | object | `{"digest":"sha256:1e7657d997c5a48253bb8dc91ecee75b63018d16ff5e5797e5af367336bc8803","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-ui-backend","tag":"v0.13.0","useDigest":true}` | Hubble-ui backend image. | +| hubble.ui.backend.image | object | `{"digest":"sha256:0e0eed917653441fded4e7cdb096b7be6a3bddded5a2dd10812a27b1fc6ed95b","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-ui-backend","tag":"v0.13.1","useDigest":true}` | Hubble-ui backend image. | | hubble.ui.backend.livenessProbe.enabled | bool | `false` | Enable liveness probe for Hubble-ui backend (requires Hubble-ui 0.12+) | | hubble.ui.backend.readinessProbe.enabled | bool | `false` | Enable readiness probe for Hubble-ui backend (requires Hubble-ui 0.12+) | | hubble.ui.backend.resources | object | `{}` | Resource requests and limits for the 'backend' container of the 'hubble-ui' deployment. | @@ -530,7 +551,7 @@ contributors across the globe, there is almost always someone available to help. | hubble.ui.frontend.extraEnv | list | `[]` | Additional hubble-ui frontend environment variables. | | hubble.ui.frontend.extraVolumeMounts | list | `[]` | Additional hubble-ui frontend volumeMounts. | | hubble.ui.frontend.extraVolumes | list | `[]` | Additional hubble-ui frontend volumes. | -| hubble.ui.frontend.image | object | `{"digest":"sha256:7d663dc16538dd6e29061abd1047013a645e6e69c115e008bee9ea9fef9a6666","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-ui","tag":"v0.13.0","useDigest":true}` | Hubble-ui frontend image. | +| hubble.ui.frontend.image | object | `{"digest":"sha256:e2e9313eb7caf64b0061d9da0efbdad59c6c461f6ca1752768942bfeda0796c6","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-ui","tag":"v0.13.1","useDigest":true}` | Hubble-ui frontend image. | | hubble.ui.frontend.resources | object | `{}` | Resource requests and limits for the 'frontend' container of the 'hubble-ui' deployment. | | hubble.ui.frontend.securityContext | object | `{}` | Hubble-ui frontend security context. | | hubble.ui.frontend.server.ipv6 | object | `{"enabled":true}` | Controls server listener for ipv6 | @@ -557,23 +578,27 @@ contributors across the globe, there is almost always someone available to help. | hubble.ui.updateStrategy | object | `{"rollingUpdate":{"maxUnavailable":1},"type":"RollingUpdate"}` | hubble-ui update strategy. | | identityAllocationMode | string | `"crd"` | Method to use for identity allocation (`crd` or `kvstore`). | | identityChangeGracePeriod | string | `"5s"` | Time to wait before using new identity on endpoint identity change. | -| image | object | `{"digest":"sha256:6aa840986a3a9722cd967ef63248d675a87add7e1704740902d5d3162f0c0def","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.15.6","useDigest":true}` | Agent container image. | -| imagePullSecrets | string | `nil` | Configure image pull secrets for pulling container images | +| image | object | `{"digest":"sha256:46ffa4ef3cf6d8885dcc4af5963b0683f7d59daa90d49ed9fb68d3b1627fe058","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.16.0","useDigest":true}` | Agent container image. | +| imagePullSecrets | list | `[]` | Configure image pull secrets for pulling container images | | ingressController.default | bool | `false` | Set cilium ingress controller to be the default ingress controller This will let cilium ingress controller route entries without ingress class set | | ingressController.defaultSecretName | string | `nil` | Default secret name for ingresses without .spec.tls[].secretName set. | | ingressController.defaultSecretNamespace | string | `nil` | Default secret namespace for ingresses without .spec.tls[].secretName set. | | ingressController.enableProxyProtocol | bool | `false` | Enable proxy protocol for all Ingress listeners. Note that _only_ Proxy protocol traffic will be accepted once this is enabled. | | ingressController.enabled | bool | `false` | Enable cilium ingress controller This will automatically set enable-envoy-config as well. | | ingressController.enforceHttps | bool | `true` | Enforce https for host having matching TLS host in Ingress. Incoming traffic to http listener will return 308 http error code with respective location in header. | -| ingressController.ingressLBAnnotationPrefixes | list | `["service.beta.kubernetes.io","service.kubernetes.io","cloud.google.com"]` | IngressLBAnnotations are the annotation and label prefixes, which are used to filter annotations and/or labels to propagate from Ingress to the Load Balancer service | -| ingressController.loadbalancerMode | string | `"dedicated"` | Default ingress load balancer mode Supported values: shared, dedicated For granular control, use the following annotations on the ingress resource ingress.cilium.io/loadbalancer-mode: shared|dedicated, | +| ingressController.hostNetwork.enabled | bool | `false` | Configure whether the Envoy listeners should be exposed on the host network. | +| ingressController.hostNetwork.nodes.matchLabels | object | `{}` | Specify the labels of the nodes where the Ingress listeners should be exposed matchLabels: kubernetes.io/os: linux kubernetes.io/hostname: kind-worker | +| ingressController.hostNetwork.sharedListenerPort | int | `8080` | Configure a specific port on the host network that gets used for the shared listener. | +| ingressController.ingressLBAnnotationPrefixes | list | `["lbipam.cilium.io","nodeipam.cilium.io","service.beta.kubernetes.io","service.kubernetes.io","cloud.google.com"]` | IngressLBAnnotations are the annotation and label prefixes, which are used to filter annotations and/or labels to propagate from Ingress to the Load Balancer service | +| ingressController.loadbalancerMode | string | `"dedicated"` | Default ingress load balancer mode Supported values: shared, dedicated For granular control, use the following annotations on the ingress resource: "ingress.cilium.io/loadbalancer-mode: dedicated" (or "shared"). | | ingressController.secretsNamespace | object | `{"create":true,"name":"cilium-secrets","sync":true}` | SecretsNamespace is the namespace in which envoy SDS will retrieve TLS secrets from. | | ingressController.secretsNamespace.create | bool | `true` | Create secrets namespace for Ingress. | | ingressController.secretsNamespace.name | string | `"cilium-secrets"` | Name of Ingress secret namespace. | | ingressController.secretsNamespace.sync | bool | `true` | Enable secret sync, which will make sure all TLS secrets used by Ingress are synced to secretsNamespace.name. If disabled, TLS secrets must be maintained externally. | -| ingressController.service | object | `{"allocateLoadBalancerNodePorts":null,"annotations":{},"insecureNodePort":null,"labels":{},"loadBalancerClass":null,"loadBalancerIP":null,"name":"cilium-ingress","secureNodePort":null,"type":"LoadBalancer"}` | Load-balancer service in shared mode. This is a single load-balancer service for all Ingress resources. | +| ingressController.service | object | `{"allocateLoadBalancerNodePorts":null,"annotations":{},"externalTrafficPolicy":"Cluster","insecureNodePort":null,"labels":{},"loadBalancerClass":null,"loadBalancerIP":null,"name":"cilium-ingress","secureNodePort":null,"type":"LoadBalancer"}` | Load-balancer service in shared mode. This is a single load-balancer service for all Ingress resources. | | ingressController.service.allocateLoadBalancerNodePorts | string | `nil` | Configure if node port allocation is required for LB service ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-nodeport-allocation | | ingressController.service.annotations | object | `{}` | Annotations to be added for the shared LB service | +| ingressController.service.externalTrafficPolicy | string | `"Cluster"` | Control how traffic from external sources is routed to the LoadBalancer Kubernetes Service for Cilium Ingress in shared mode. Valid values are "Cluster" and "Local". ref: https://kubernetes.io/docs/reference/networking/virtual-ips/#external-traffic-policy | | ingressController.service.insecureNodePort | string | `nil` | Configure a specific nodePort for insecure HTTP traffic on the shared LB service | | ingressController.service.labels | object | `{}` | Labels to be added for the shared LB service | | ingressController.service.loadBalancerClass | string | `nil` | Configure a specific loadBalancerClass on the shared LB service (requires Kubernetes 1.24+) | @@ -591,18 +616,20 @@ contributors across the globe, there is almost always someone available to help. | ipam.operator.clusterPoolIPv4PodCIDRList | list | `["10.0.0.0/8"]` | IPv4 CIDR list range to delegate to individual nodes for IPAM. | | ipam.operator.clusterPoolIPv6MaskSize | int | `120` | IPv6 CIDR mask size to delegate to individual nodes for IPAM. | | ipam.operator.clusterPoolIPv6PodCIDRList | list | `["fd00::/104"]` | IPv6 CIDR list range to delegate to individual nodes for IPAM. | -| ipam.operator.externalAPILimitBurstSize | string | `20` | The maximum burst size when rate limiting access to external APIs. Also known as the token bucket capacity. | -| ipam.operator.externalAPILimitQPS | string | `4.0` | The maximum queries per second when rate limiting access to external APIs. Also known as the bucket refill rate, which is used to refill the bucket up to the burst size capacity. | +| ipam.operator.externalAPILimitBurstSize | int | `20` | The maximum burst size when rate limiting access to external APIs. Also known as the token bucket capacity. | +| ipam.operator.externalAPILimitQPS | float | `4.0` | The maximum queries per second when rate limiting access to external APIs. Also known as the bucket refill rate, which is used to refill the bucket up to the burst size capacity. | | ipv4.enabled | bool | `true` | Enable IPv4 support. | | ipv4NativeRoutingCIDR | string | `""` | Allows to explicitly specify the IPv4 CIDR for native routing. When specified, Cilium assumes networking for this CIDR is preconfigured and hands traffic destined for that range to the Linux network stack without applying any SNAT. Generally speaking, specifying a native routing CIDR implies that Cilium can depend on the underlying networking stack to route packets to their destination. To offer a concrete example, if Cilium is configured to use direct routing and the Kubernetes CIDR is included in the native routing CIDR, the user must configure the routes to reach pods, either manually or by setting the auto-direct-node-routes flag. | | ipv6.enabled | bool | `false` | Enable IPv6 support. | | ipv6NativeRoutingCIDR | string | `""` | Allows to explicitly specify the IPv6 CIDR for native routing. When specified, Cilium assumes networking for this CIDR is preconfigured and hands traffic destined for that range to the Linux network stack without applying any SNAT. Generally speaking, specifying a native routing CIDR implies that Cilium can depend on the underlying networking stack to route packets to their destination. To offer a concrete example, if Cilium is configured to use direct routing and the Kubernetes CIDR is included in the native routing CIDR, the user must configure the routes to reach pods, either manually or by setting the auto-direct-node-routes flag. | -| k8s | object | `{}` | Configure Kubernetes specific configuration | +| k8s | object | `{"requireIPv4PodCIDR":false,"requireIPv6PodCIDR":false}` | Configure Kubernetes specific configuration | +| k8s.requireIPv4PodCIDR | bool | `false` | requireIPv4PodCIDR enables waiting for Kubernetes to provide the PodCIDR range via the Kubernetes node resource | +| k8s.requireIPv6PodCIDR | bool | `false` | requireIPv6PodCIDR enables waiting for Kubernetes to provide the PodCIDR range via the Kubernetes node resource | | k8sClientRateLimit | object | `{"burst":null,"qps":null}` | Configure the client side rate limit for the agent and operator If the amount of requests to the Kubernetes API server exceeds the configured rate limit, the agent and operator will start to throttle requests by delaying them until there is budget or the request times out. | | k8sClientRateLimit.burst | int | 10 for k8s up to 1.26. 20 for k8s version 1.27+ | The burst request rate in requests per second. The rate limiter will allow short bursts with a higher rate. | | k8sClientRateLimit.qps | int | 5 for k8s up to 1.26. 10 for k8s version 1.27+ | The sustained request rate in requests per second. | | k8sNetworkPolicy.enabled | bool | `true` | Enable support for K8s NetworkPolicy | -| k8sServiceHost | string | `""` | Kubernetes service host | +| k8sServiceHost | string | `""` | Kubernetes service host - use "auto" for automatic lookup from the cluster-info ConfigMap (kubeadm-based clusters only) | | k8sServicePort | string | `""` | Kubernetes service port | | keepDeprecatedLabels | bool | `false` | Keep the deprecated selector labels when deploying Cilium DaemonSet. | | keepDeprecatedProbes | bool | `false` | Keep the deprecated probes when deploying Cilium DaemonSet | @@ -632,13 +659,16 @@ contributors across the globe, there is almost always someone available to help. | name | string | `"cilium"` | Agent container name. | | nat46x64Gateway | object | `{"enabled":false}` | Configure standalone NAT46/NAT64 gateway | | nat46x64Gateway.enabled | bool | `false` | Enable RFC8215-prefixed translation | -| nodePort | object | `{"autoProtectPortRange":true,"bindProtection":true,"enableHealthCheck":true,"enableHealthCheckLoadBalancerIP":false,"enabled":false}` | Configure N-S k8s service loadbalancing | +| nodeIPAM.enabled | bool | `false` | Configure Node IPAM ref: https://docs.cilium.io/en/stable/network/node-ipam/ | +| nodePort | object | `{"addresses":null,"autoProtectPortRange":true,"bindProtection":true,"enableHealthCheck":true,"enableHealthCheckLoadBalancerIP":false,"enabled":false}` | Configure N-S k8s service loadbalancing | +| nodePort.addresses | string | `nil` | List of CIDRs for choosing which IP addresses assigned to native devices are used for NodePort load-balancing. By default this is empty and the first suitable, preferably private, IPv4 and IPv6 address assigned to each device is used. Example: addresses: ["192.168.1.0/24", "2001::/64"] | | nodePort.autoProtectPortRange | bool | `true` | Append NodePort range to ip_local_reserved_ports if clash with ephemeral ports is detected. | | nodePort.bindProtection | bool | `true` | Set to true to prevent applications binding to service ports. | | nodePort.enableHealthCheck | bool | `true` | Enable healthcheck nodePort server for NodePort services | | nodePort.enableHealthCheckLoadBalancerIP | bool | `false` | Enable access of the healthcheck nodePort on the LoadBalancerIP. Needs EnableHealthCheck to be enabled | | nodePort.enabled | bool | `false` | Enable the Cilium NodePort service implementation. | | nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node selector for cilium-agent. | +| nodeSelectorLabels | bool | `false` | Enable/Disable use of node label based identity | | nodeinit.affinity | object | `{}` | Affinity for cilium-nodeinit | | nodeinit.annotations | object | `{}` | Annotations to be added to all top-level nodeinit objects (resources under templates/cilium-nodeinit) | | nodeinit.bootstrapFile | string | `"/tmp/cilium-bootstrap.d/cilium-bootstrap-time"` | bootstrapFile is the location of the file where the bootstrap timestamp is written by the node-init DaemonSet | @@ -646,7 +676,7 @@ contributors across the globe, there is almost always someone available to help. | nodeinit.extraEnv | list | `[]` | Additional nodeinit environment variables. | | nodeinit.extraVolumeMounts | list | `[]` | Additional nodeinit volumeMounts. | | nodeinit.extraVolumes | list | `[]` | Additional nodeinit volumes. | -| nodeinit.image | object | `{"digest":"sha256:820155cb3b7f00c8d61c1cffa68c44440906cb046bdbad8ff544f5deb1103456","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/startup-script","tag":"19fb149fb3d5c7a37d3edfaf10a2be3ab7386661","useDigest":true}` | node-init image. | +| nodeinit.image | object | `{"digest":"sha256:8d7b41c4ca45860254b3c19e20210462ef89479bb6331d6760c4e609d651b29c","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/startup-script","tag":"c54c7edeab7fde4da68e59acd319ab24af242c3f","useDigest":true}` | node-init image. | | nodeinit.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for nodeinit pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector | | nodeinit.podAnnotations | object | `{}` | Annotations to be added to node-init pods. | | nodeinit.podLabels | object | `{}` | Labels to be added to node-init pods. | @@ -670,9 +700,10 @@ contributors across the globe, there is almost always someone available to help. | operator.extraHostPathMounts | list | `[]` | Additional cilium-operator hostPath mounts. | | operator.extraVolumeMounts | list | `[]` | Additional cilium-operator volumeMounts. | | operator.extraVolumes | list | `[]` | Additional cilium-operator volumes. | +| operator.hostNetwork | bool | `true` | HostNetwork setting | | operator.identityGCInterval | string | `"15m0s"` | Interval for identity garbage collection. | | operator.identityHeartbeatTimeout | string | `"30m0s"` | Timeout for identity heartbeats. | -| operator.image | object | `{"alibabacloudDigest":"sha256:7e1664bd18645b38fd41dc1c2decd334abeefe63d4d69bfbc65765806eb4a31f","awsDigest":"sha256:9656d44ee69817d156cc7d3797f92de2e534dfb991610c79c00e097b4dedd620","azureDigest":"sha256:386456c055c5d1380daf966d565fcafaed68467a4fe692679530764e3b56f170","genericDigest":"sha256:5789f0935eef96ad571e4f5565a8800d3a8fbb05265cf6909300cd82fd513c3d","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/operator","suffix":"","tag":"v1.15.6","useDigest":true}` | cilium-operator image. | +| operator.image | object | `{"alibabacloudDigest":"sha256:d2d9f450f2fc650d74d4b3935f4c05736e61145b9c6927520ea52e1ebcf4f3ea","awsDigest":"sha256:8dbe47a77ba8e1a5b111647a43db10c213d1c7dfc9f9aab5ef7279321ad21a2f","azureDigest":"sha256:dd7562e20bc72b55c65e2110eb98dca1dd2bbf6688b7d8cea2bc0453992c121d","genericDigest":"sha256:d6621c11c4e4943bf2998af7febe05be5ed6fdcf812b27ad4388f47022190316","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/operator","suffix":"","tag":"v1.16.0","useDigest":true}` | cilium-operator image. | | operator.nodeGCInterval | string | `"5m0s"` | Interval for cilium node garbage collection. | | operator.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for cilium-operator pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector | | operator.podAnnotations | object | `{}` | Annotations to be added to cilium-operator pods | @@ -700,7 +731,6 @@ contributors across the globe, there is almost always someone available to help. | operator.securityContext | object | `{}` | Security context to be added to cilium-operator pods | | operator.setNodeNetworkStatus | bool | `true` | Set Node condition NetworkUnavailable to 'false' with the reason 'CiliumIsUp' for nodes that have a healthy Cilium pod. | | operator.setNodeTaints | string | same as removeNodeTaints | Taint nodes where Cilium is scheduled but not running. This prevents pods from being scheduled to nodes where Cilium is not the default CNI provider. | -| operator.skipCNPStatusStartupClean | bool | `false` | Skip CNP node status clean up at operator startup. | | operator.skipCRDCreation | bool | `false` | Skip CRDs creation for cilium-operator | | operator.tolerations | list | `[{"operator":"Exists"}]` | Node tolerations for cilium-operator scheduling to nodes with taints ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ | | operator.topologySpreadConstraints | list | `[]` | Pod topology spread constraints for cilium-operator | @@ -723,7 +753,7 @@ contributors across the globe, there is almost always someone available to help. | preflight.extraEnv | list | `[]` | Additional preflight environment variables. | | preflight.extraVolumeMounts | list | `[]` | Additional preflight volumeMounts. | | preflight.extraVolumes | list | `[]` | Additional preflight volumes. | -| preflight.image | object | `{"digest":"sha256:6aa840986a3a9722cd967ef63248d675a87add7e1704740902d5d3162f0c0def","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.15.6","useDigest":true}` | Cilium pre-flight image. | +| preflight.image | object | `{"digest":"sha256:46ffa4ef3cf6d8885dcc4af5963b0683f7d59daa90d49ed9fb68d3b1627fe058","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.16.0","useDigest":true}` | Cilium pre-flight image. | | preflight.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for preflight pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector | | preflight.podAnnotations | object | `{}` | Annotations to be added to preflight pods | | preflight.podDisruptionBudget.enabled | bool | `false` | enable PodDisruptionBudget ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ | @@ -732,11 +762,13 @@ contributors across the globe, there is almost always someone available to help. | preflight.podLabels | object | `{}` | Labels to be added to the preflight pod. | | preflight.podSecurityContext | object | `{}` | Security context to be added to preflight pods. | | preflight.priorityClassName | string | `""` | The priority class to use for the preflight pod. | +| preflight.readinessProbe.initialDelaySeconds | int | `5` | For how long kubelet should wait before performing the first probe | +| preflight.readinessProbe.periodSeconds | int | `5` | interval between checks of the readiness probe | | preflight.resources | object | `{}` | preflight resource limits & requests ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | | preflight.securityContext | object | `{}` | Security context to be added to preflight pods | | preflight.terminationGracePeriodSeconds | int | `1` | Configure termination grace period for preflight Deployment and DaemonSet. | | preflight.tofqdnsPreCache | string | `""` | Path to write the `--tofqdns-pre-cache` file to. | -| preflight.tolerations | list | `[{"effect":"NoSchedule","key":"node.kubernetes.io/not-ready"},{"effect":"NoSchedule","key":"node-role.kubernetes.io/master"},{"effect":"NoSchedule","key":"node-role.kubernetes.io/control-plane"},{"effect":"NoSchedule","key":"node.cloudprovider.kubernetes.io/uninitialized","value":"true"},{"key":"CriticalAddonsOnly","operator":"Exists"}]` | Node tolerations for preflight scheduling to nodes with taints ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ | +| preflight.tolerations | list | `[{"operator":"Exists"}]` | Node tolerations for preflight scheduling to nodes with taints ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ | | preflight.updateStrategy | object | `{"type":"RollingUpdate"}` | preflight update strategy | | preflight.validateCNPs | bool | `true` | By default we should always validate the installed CNPs before upgrading Cilium. This will make sure the user will have the policies deployed in the cluster with the right schema. | | priorityClassName | string | `""` | The priority class to use for cilium-agent. | @@ -751,14 +783,9 @@ contributors across the globe, there is almost always someone available to help. | prometheus.serviceMonitor.metricRelabelings | string | `nil` | Metrics relabeling configs for the ServiceMonitor cilium-agent | | prometheus.serviceMonitor.relabelings | list | `[{"replacement":"${1}","sourceLabels":["__meta_kubernetes_pod_node_name"],"targetLabel":"node"}]` | Relabeling configs for the ServiceMonitor cilium-agent | | prometheus.serviceMonitor.trustCRDsExist | bool | `false` | Set to `true` and helm will not check for monitoring.coreos.com/v1 CRDs before deploying | -| proxy | object | `{"prometheus":{"enabled":true,"port":null},"sidecarImageRegex":"cilium/istio_proxy"}` | Configure Istio proxy options. | -| proxy.prometheus.enabled | bool | `true` | Deprecated in favor of envoy.prometheus.enabled | -| proxy.prometheus.port | string | `nil` | Deprecated in favor of envoy.prometheus.port | -| proxy.sidecarImageRegex | string | `"cilium/istio_proxy"` | Regular expression matching compatible Istio sidecar istio-proxy container image names | | rbac.create | bool | `true` | Enable creation of Resource-Based Access Control configuration. | | readinessProbe.failureThreshold | int | `3` | failure threshold of readiness probe | | readinessProbe.periodSeconds | int | `30` | interval between checks of the readiness probe | -| remoteNodeIdentity | bool | `true` | Enable use of the remote node identity. ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity Deprecated without replacement in 1.15. To be removed in 1.16. | | resourceQuotas | object | `{"cilium":{"hard":{"pods":"10k"}},"enabled":false,"operator":{"hard":{"pods":"15"}}}` | Enable resource quotas for priority classes used in the cluster. | | resources | object | `{}` | Agent resource limits & requests ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | | rollOutCiliumPods | bool | `false` | Roll out cilium agent pods automatically when configmap is updated. | @@ -783,6 +810,8 @@ contributors across the globe, there is almost always someone available to help. | startupProbe.periodSeconds | int | `2` | interval between checks of the startup probe | | svcSourceRangeCheck | bool | `true` | Enable check of service source ranges (currently, only for LoadBalancer). | | synchronizeK8sNodes | bool | `true` | Synchronize Kubernetes nodes to kvstore and perform CNP GC. | +| sysctlfix | object | `{"enabled":true}` | Configure sysctl override described in #20072. | +| sysctlfix.enabled | bool | `true` | Enable the sysctl override. When enabled, the init container will mount the /proc of the host so that the `sysctlfix` utility can execute. | | terminationGracePeriodSeconds | int | `1` | Configure termination grace period for cilium-agent DaemonSet. | | tls | object | `{"ca":{"cert":"","certValidityDuration":1095,"key":""},"caBundle":{"enabled":false,"key":"ca.crt","name":"cilium-root-ca.crt","useSecret":false},"secretsBackend":"local"}` | Configure TLS configuration in the agent. | | tls.ca | object | `{"cert":"","certValidityDuration":1095,"key":""}` | Base64 encoded PEM values for the CA certificate and private key. This can be used as common CA to generate certificates used by hubble and clustermesh components. It is neither required nor used when cert-manager is used to generate the certificates. | @@ -799,6 +828,7 @@ contributors across the globe, there is almost always someone available to help. | tunnelPort | int | Port 8472 for VXLAN, Port 6081 for Geneve | Configure VXLAN and Geneve tunnel port. | | tunnelProtocol | string | `"vxlan"` | Tunneling protocol to use in tunneling mode and for ad-hoc tunnels. Possible values: - "" - vxlan - geneve | | updateStrategy | object | `{"rollingUpdate":{"maxUnavailable":2},"type":"RollingUpdate"}` | Cilium agent update strategy | +| upgradeCompatibility | string | `nil` | upgradeCompatibility helps users upgrading to ensure that the configMap for Cilium will not change critical values to ensure continued operation This flag is not required for new installations. For example: '1.7', '1.8', '1.9' | | vtep.cidr | string | `""` | A space separated list of VTEP device CIDRs, for example "1.1.1.0/24 1.1.2.0/24" | | vtep.enabled | bool | `false` | Enables VXLAN Tunnel Endpoint (VTEP) Integration (beta) to allow Cilium-managed pods to talk to third party VTEP devices over Cilium tunnel. | | vtep.endpoint | string | `""` | A space separated list of VTEP device endpoint IPs, for example "1.1.1.1 1.1.2.1" | diff --git a/argocd-helm-charts/cilium/charts/cilium/files/cilium-agent/dashboards/cilium-dashboard.json b/argocd-helm-charts/cilium/charts/cilium/files/cilium-agent/dashboards/cilium-dashboard.json index 94af2eac3..a2ba01d8d 100644 --- a/argocd-helm-charts/cilium/charts/cilium/files/cilium-agent/dashboards/cilium-dashboard.json +++ b/argocd-helm-charts/cilium/charts/cilium/files/cilium-agent/dashboards/cilium-dashboard.json @@ -4691,21 +4691,21 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(cilium_policy_l7_denied_total{k8s_app=\"cilium\", pod=~\"$pod\"}[1m]))", + "expr": "sum(rate(cilium_policy_l7_total{k8s_app=\"cilium\", pod=~\"$pod\", rule=\"denied\"}[1m]))", "format": "time_series", "intervalFactor": 1, "legendFormat": "denied", "refId": "A" }, { - "expr": "sum(rate(cilium_policy_l7_forwarded_total{k8s_app=\"cilium\", pod=~\"$pod\"}[1m]))", + "expr": "sum(rate(cilium_policy_l7_total{k8s_app=\"cilium\", pod=~\"$pod\", rule=\"forwarded\"}[1m]))", "format": "time_series", "intervalFactor": 1, "legendFormat": "forwarded", "refId": "B" }, { - "expr": "sum(rate(cilium_policy_l7_received_total{k8s_app=\"cilium\", pod=~\"$pod\"}[1m]))", + "expr": "sum(rate(cilium_policy_l7_total{k8s_app=\"cilium\", pod=~\"$pod\", rule=\"received\"}[1m]))", "format": "time_series", "intervalFactor": 1, "legendFormat": "received", @@ -4857,7 +4857,7 @@ "aliasColors": { "Max per node processingTime": "#e24d42", "Max per node upstreamTime": "#58140c", - "avg(cilium_policy_l7_parse_errors_total{pod=~\"cilium.*\"})": "#bf1b00", + "avg(cilium_policy_l7_total{pod=~\"cilium.*\", rule=\"parse_errors\"})": "#bf1b00", "parse errors": "#bf1b00" }, "bars": true, @@ -4916,7 +4916,7 @@ "yaxis": 2 }, { - "alias": "avg(cilium_policy_l7_parse_errors_total{pod=~\"cilium.*\"})", + "alias": "avg(cilium_policy_l7_total{pod=~\"cilium.*\", rule=\"parse_errors\"})", "yaxis": 2 }, { @@ -4937,7 +4937,7 @@ "refId": "A" }, { - "expr": "avg(cilium_policy_l7_parse_errors_total{k8s_app=\"cilium\", pod=~\"$pod\"}) by (pod)", + "expr": "avg(cilium_policy_l7_total{k8s_app=\"cilium\", pod=~\"$pod\", rule=\"parse_errors\"}) by (pod)", "format": "time_series", "intervalFactor": 1, "legendFormat": "parse errors", @@ -5295,7 +5295,7 @@ "refId": "B" }, { - "expr": "max(rate(cilium_policy_l7_parse_errors_total{k8s_app=\"cilium\", pod=~\"$pod\"}[1m])) by (pod)", + "expr": "max(rate(cilium_policy_l7_total{k8s_app=\"cilium\", pod=~\"$pod\", rule=\"parse_errors\"}[1m])) by (pod)", "format": "time_series", "intervalFactor": 1, "legendFormat": "parse errors", diff --git a/argocd-helm-charts/cilium/charts/cilium/files/cilium-envoy/configmap/bootstrap-config.json b/argocd-helm-charts/cilium/charts/cilium/files/cilium-envoy/configmap/bootstrap-config.json index 87939f699..ebd329cdb 100644 --- a/argocd-helm-charts/cilium/charts/cilium/files/cilium-envoy/configmap/bootstrap-config.json +++ b/argocd-helm-charts/cilium/charts/cilium/files/cilium-envoy/configmap/bootstrap-config.json @@ -5,13 +5,13 @@ }, "staticResources": { "listeners": [ - {{- if and .Values.proxy.prometheus.enabled .Values.envoy.prometheus.enabled }} + {{- if .Values.envoy.prometheus.enabled }} { "name": "envoy-prometheus-metrics-listener", "address": { "socket_address": { "address": "0.0.0.0", - "port_value": {{ .Values.proxy.prometheus.port | default .Values.envoy.prometheus.port }} + "port_value": {{ .Values.envoy.prometheus.port }} } }, "filter_chains": [ @@ -60,6 +60,73 @@ ] }, {{- end }} + {{- if and .Values.envoy.debug.admin.enabled }} + { + "name": "envoy-admin-listener", + "address": { + "socket_address": { + "address": {{ .Values.ipv4.enabled | ternary "127.0.0.1" "::1" | quote }}, + "port_value": {{ .Values.envoy.debug.admin.port }} + } + }, + {{- if and .Values.ipv4.enabled .Values.ipv6.enabled }} + "additional_addresses": [ + { + "address": { + "socket_address": { + "address": "::1", + "port_value": {{ .Values.envoy.debug.admin.port }} + } + } + } + ], + {{- end }} + "filter_chains": [ + { + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "stat_prefix": "envoy-admin-listener", + "route_config": { + "virtual_hosts": [ + { + "name": "admin_route", + "domains": [ + "*" + ], + "routes": [ + { + "name": "admin_route", + "match": { + "prefix": "/" + }, + "route": { + "cluster": "/envoy-admin", + "prefix_rewrite": "/" + } + } + ] + } + ] + }, + "http_filters": [ + { + "name": "envoy.filters.http.router", + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } + } + ], + "stream_idle_timeout": "0s" + } + } + ] + } + ] + }, + {{- end }} { "name": "envoy-health-listener", "address": { diff --git a/argocd-helm-charts/cilium/charts/cilium/files/cilium-operator/dashboards/cilium-operator-dashboard.json b/argocd-helm-charts/cilium/charts/cilium/files/cilium-operator/dashboards/cilium-operator-dashboard.json index e677deae8..116c2c983 100644 --- a/argocd-helm-charts/cilium/charts/cilium/files/cilium-operator/dashboards/cilium-operator-dashboard.json +++ b/argocd-helm-charts/cilium/charts/cilium/files/cilium-operator/dashboards/cilium-operator-dashboard.json @@ -1,9 +1,22 @@ { + "__inputs": [ + { + "name": "DS_PROMETHEUS", + "label": "prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], "annotations": { "list": [ { "builtIn": 1, - "datasource": "-- Grafana --", + "datasource": { + "type": "datasource", + "uid": "grafana" + }, "enable": true, "hide": true, "iconColor": "rgba(0, 211, 255, 1)", @@ -25,7 +38,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fieldConfig": { "defaults": { "custom": {} @@ -151,7 +167,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fieldConfig": { "defaults": { "custom": {} @@ -281,7 +300,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fieldConfig": { "defaults": { "custom": {} @@ -378,7 +400,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fieldConfig": { "defaults": { "custom": {} @@ -475,7 +500,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fieldConfig": { "defaults": { "custom": {} @@ -572,7 +600,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fieldConfig": { "defaults": { "custom": {} @@ -669,7 +700,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fieldConfig": { "defaults": { "custom": {} @@ -766,7 +800,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fieldConfig": { "defaults": { "custom": {} @@ -863,7 +900,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fieldConfig": { "defaults": { "custom": {} diff --git a/argocd-helm-charts/cilium/charts/cilium/files/hubble/dashboards/hubble-dashboard.json b/argocd-helm-charts/cilium/charts/cilium/files/hubble/dashboards/hubble-dashboard.json index 8de5ec1d0..783aa131c 100644 --- a/argocd-helm-charts/cilium/charts/cilium/files/hubble/dashboards/hubble-dashboard.json +++ b/argocd-helm-charts/cilium/charts/cilium/files/hubble/dashboards/hubble-dashboard.json @@ -1,4 +1,14 @@ { + "__inputs": [ + { + "name": "DS_PROMETHEUS", + "label": "prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], "annotations": { "list": [ { @@ -36,7 +46,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fill": 1, "gridPos": { "h": 5, @@ -151,7 +164,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fill": 1, "gridPos": { "h": 5, @@ -237,7 +253,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fill": 1, "gridPos": { "h": 5, @@ -323,7 +342,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fill": 1, "gridPos": { "h": 5, @@ -422,7 +444,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fill": 1, "gridPos": { "h": 5, @@ -508,7 +533,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fill": 1, "gridPos": { "h": 5, @@ -594,7 +622,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fill": 1, "gridPos": { "h": 5, @@ -681,7 +712,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fill": 1, "gridPos": { "h": 5, @@ -773,7 +807,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fill": 1, "gridPos": { "h": 5, @@ -906,7 +943,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fill": 1, "gridPos": { "h": 5, @@ -1014,7 +1054,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fill": 1, "gridPos": { "h": 5, @@ -1139,7 +1182,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fill": 1, "gridPos": { "h": 5, @@ -1247,7 +1293,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fill": 1, "gridPos": { "h": 5, @@ -1367,7 +1416,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fill": 1, "gridPos": { "h": 5, @@ -1462,7 +1514,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fill": 1, "gridPos": { "h": 5, @@ -1548,7 +1603,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fill": 1, "gridPos": { "h": 5, @@ -1648,7 +1706,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fill": 1, "gridPos": { "h": 4, @@ -1734,7 +1795,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fill": 1, "gridPos": { "h": 4, @@ -1820,7 +1884,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fill": 1, "gridPos": { "h": 5, @@ -1906,7 +1973,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fill": 1, "gridPos": { "h": 5, @@ -2005,7 +2075,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fill": 1, "gridPos": { "h": 6, @@ -2092,7 +2165,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fill": 1, "gridPos": { "h": 6, @@ -2179,7 +2255,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fill": 1, "gridPos": { "h": 5, @@ -2265,7 +2344,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fill": 1, "gridPos": { "h": 5, @@ -2351,7 +2433,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fill": 1, "gridPos": { "h": 5, @@ -2451,7 +2536,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fill": 1, "gridPos": { "h": 5, @@ -2537,7 +2625,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fill": 1, "gridPos": { "h": 5, @@ -2658,7 +2749,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fill": 1, "gridPos": { "h": 5, @@ -2752,7 +2846,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fill": 1, "gridPos": { "h": 5, @@ -2839,7 +2936,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fill": 1, "gridPos": { "h": 5, @@ -2926,7 +3026,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fill": 1, "gridPos": { "h": 5, @@ -3013,7 +3116,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fill": 1, "gridPos": { "h": 5, @@ -3103,7 +3209,10 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "prometheus", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "fill": 1, "gridPos": { "h": 6, @@ -3194,7 +3303,23 @@ "style": "dark", "tags": [], "templating": { - "list": [] + "list": [ + { + "current": {}, + "hide": 0, + "includeAll": false, + "label": "Prometheus", + "multi": false, + "name": "DS_PROMETHEUS", + "options": [], + "query": "prometheus", + "queryValue": "", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + } + ] }, "time": { "from": "now-6h", diff --git a/argocd-helm-charts/cilium/charts/cilium/files/hubble/dashboards/hubble-dns-namespace.json b/argocd-helm-charts/cilium/charts/cilium/files/hubble/dashboards/hubble-dns-namespace.json index d286fdb3a..57f804cf2 100644 --- a/argocd-helm-charts/cilium/charts/cilium/files/hubble/dashboards/hubble-dns-namespace.json +++ b/argocd-helm-charts/cilium/charts/cilium/files/hubble/dashboards/hubble-dns-namespace.json @@ -484,7 +484,7 @@ "includeAll": false, "label": "Data Source", "multi": false, - "name": "prometheus_datasource", + "name": "DS_PROMETHEUS", "options": [], "query": "prometheus", "queryValue": "", diff --git a/argocd-helm-charts/cilium/charts/cilium/files/hubble/dashboards/hubble-l7-http-metrics-by-workload.json b/argocd-helm-charts/cilium/charts/cilium/files/hubble/dashboards/hubble-l7-http-metrics-by-workload.json index 428c58f43..b21004a69 100644 --- a/argocd-helm-charts/cilium/charts/cilium/files/hubble/dashboards/hubble-l7-http-metrics-by-workload.json +++ b/argocd-helm-charts/cilium/charts/cilium/files/hubble/dashboards/hubble-l7-http-metrics-by-workload.json @@ -1,5 +1,14 @@ { - "__inputs": [], + "__inputs": [ + { + "name": "DS_PROMETHEUS", + "label": "prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], "__elements": {}, "__requires": [ { diff --git a/argocd-helm-charts/cilium/charts/cilium/files/hubble/dashboards/hubble-network-overview-namespace.json b/argocd-helm-charts/cilium/charts/cilium/files/hubble/dashboards/hubble-network-overview-namespace.json index d0cf9d3b4..cddb473d7 100644 --- a/argocd-helm-charts/cilium/charts/cilium/files/hubble/dashboards/hubble-network-overview-namespace.json +++ b/argocd-helm-charts/cilium/charts/cilium/files/hubble/dashboards/hubble-network-overview-namespace.json @@ -883,7 +883,7 @@ "includeAll": false, "label": "Data Source", "multi": false, - "name": "prometheus_datasource", + "name": "DS_PROMETHEUS", "options": [], "query": "prometheus", "queryValue": "", diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/_extensions.tpl b/argocd-helm-charts/cilium/charts/cilium/templates/_extensions.tpl new file mode 100644 index 000000000..28168ff81 --- /dev/null +++ b/argocd-helm-charts/cilium/charts/cilium/templates/_extensions.tpl @@ -0,0 +1,50 @@ +{{/* +_extensions.tpl contains template blocks that are intended to allow packagers +to modify or extend the default chart behaviors. +*/}} + +{{/* +Intentionally empty to allow downstream chart packagers to add extra +containers to hubble-relay without having to modify the deployment manifest +directly. +*/}} +{{- define "hubble-relay.containers.extra" }} +{{- end }} + +{{/* +Allow packagers to add extra volumes to relay. +*/}} +{{- define "hubble-relay.volumes.extra" }} +{{- end }} + +{{/* +Allow packagers to modify how hubble-relay TLS is configured. + +A packager may want to change when TLS is enabled or prevent users from +disabling TLS. This means the template needs to allow overriding, not just +adding, which is why this template is not empty by default, like the ones +above. +*/}} +{{- define "hubble-relay.config.tls" }} +{{- if and .Values.hubble.tls.enabled .Values.hubble.relay.tls.server.enabled }} +tls-relay-server-cert-file: /var/lib/hubble-relay/tls/server.crt +tls-relay-server-key-file: /var/lib/hubble-relay/tls/server.key +{{- if .Values.hubble.relay.tls.server.mtls }} +tls-relay-client-ca-files: /var/lib/hubble-relay/tls/hubble-server-ca.crt +{{- end }} +{{- else }} +disable-server-tls: true +{{- end }} +{{- end }} + +{{- define "hubble-relay.config.listenAddress" -}} +{{- .Values.hubble.relay.listenHost }}:{{- include "hubble-relay.config.listenPort" . -}} +{{- end }} + +{{- define "hubble-relay.config.listenPort" -}} +{{- .Values.hubble.relay.listenPort }} +{{- end }} + +{{- define "hubble-relay.service.targetPort" -}} +grpc +{{- end }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/_helpers.tpl b/argocd-helm-charts/cilium/charts/cilium/templates/_helpers.tpl index 3e5429e2a..bf52b3747 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/_helpers.tpl +++ b/argocd-helm-charts/cilium/charts/cilium/templates/_helpers.tpl @@ -43,62 +43,7 @@ where: {{- if $priorityClass }} {{- $priorityClass }} {{- else if and $root.Values.enableCriticalPriorityClass $criticalPriorityClass -}} - {{- if and (eq $root.Release.Namespace "kube-system") (semverCompare ">=1.10-0" $root.Capabilities.KubeVersion.Version) -}} - {{- $criticalPriorityClass }} - {{- else if semverCompare ">=1.17-0" $root.Capabilities.KubeVersion.Version -}} - {{- $criticalPriorityClass }} - {{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for ingress. -*/}} -{{- define "ingress.apiVersion" -}} -{{- if semverCompare ">=1.16-0, <1.19-0" .Capabilities.KubeVersion.Version -}} -{{- print "networking.k8s.io/v1beta1" -}} -{{- else if semverCompare "^1.19-0" .Capabilities.KubeVersion.Version -}} -{{- print "networking.k8s.io/v1" -}} -{{- end -}} -{{- end -}} - -{{/* -Return the appropriate backend for Hubble UI ingress. -*/}} -{{- define "ingress.paths" -}} -{{ if semverCompare ">=1.4-0, <1.19-0" .Capabilities.KubeVersion.Version -}} -backend: - serviceName: hubble-ui - servicePort: http -{{- else if semverCompare "^1.19-0" .Capabilities.KubeVersion.Version -}} -pathType: Prefix -backend: - service: - name: hubble-ui - port: - name: http -{{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for cronjob. -*/}} -{{- define "cronjob.apiVersion" -}} -{{- if semverCompare ">=1.21-0" .Capabilities.KubeVersion.Version -}} -{{- print "batch/v1" -}} -{{- else -}} -{{- print "batch/v1beta1" -}} -{{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for podDisruptionBudget. -*/}} -{{- define "podDisruptionBudget.apiVersion" -}} -{{- if semverCompare ">=1.21-0" .Capabilities.KubeVersion.Version -}} -{{- print "policy/v1" -}} -{{- else -}} -{{- print "policy/v1beta1" -}} + {{- $criticalPriorityClass }} {{- end -}} {{- end -}} @@ -154,3 +99,61 @@ Validate duration field, return validated duration, 0s when provided duration is 0s {{- end }} {{- end }} + +{{/* +Convert a map to a comma-separated string: key1=value1,key2=value2 +*/}} +{{- define "mapToString" -}} +{{- $list := list -}} +{{- range $k, $v := . -}} +{{- $list = append $list (printf "%s=%s" $k $v) -}} +{{- end -}} +{{ join "," $list }} +{{- end -}} + +{{/* +Enable automatic lookup of k8sServiceHost from the cluster-info ConfigMap (kubeadm-based clusters only) +*/}} +{{- define "k8sServiceHost" }} + {{- if eq .Values.k8sServiceHost "auto" }} + {{- $configmap := (lookup "v1" "ConfigMap" "kube-public" "cluster-info") }} + {{- $kubeconfig := get $configmap.data "kubeconfig" }} + {{- $k8sServer := get ($kubeconfig | fromYaml) "clusters" | mustFirst | dig "cluster" "server" "" }} + {{- $uri := (split "https://" $k8sServer)._1 | trim }} + {{- (split ":" $uri)._0 | quote }} + {{- else }} + {{- .Values.k8sServiceHost | quote }} + {{- end }} +{{- end }} + +{{/* +Enable automatic lookup of k8sServicePort from the cluster-info ConfigMap (kubeadm-based clusters only) +*/}} +{{- define "k8sServicePort" }} + {{- if eq .Values.k8sServiceHost "auto" }} + {{- $configmap := (lookup "v1" "ConfigMap" "kube-public" "cluster-info") }} + {{- $kubeconfig := get $configmap.data "kubeconfig" }} + {{- $k8sServer := get ($kubeconfig | fromYaml) "clusters" | mustFirst | dig "cluster" "server" "" }} + {{- $uri := (split "https://" $k8sServer)._1 | trim }} + {{- (split ":" $uri)._1 | quote }} + {{- else }} + {{- .Values.k8sServicePort | quote }} + {{- end }} +{{- end }} + +{{/* +Return user specify envoy.enabled or default value based on the upgradeCompatibility +*/}} +{{- define "envoyDaemonSetEnabled" }} + {{- if not .Values.l7Proxy }} + {{- false }} + {{- else if (not (kindIs "invalid" .Values.envoy.enabled)) }} + {{- .Values.envoy.enabled }} + {{- else }} + {{- if semverCompare ">=1.16" (default "1.16" .Values.upgradeCompatibility) }} + {{- true }} + {{- else }} + {{- false }} + {{- end }} + {{- end }} +{{- end }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-agent/clusterrole.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-agent/clusterrole.yaml index 9ccf21926..900ddd87f 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-agent/clusterrole.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-agent/clusterrole.yaml @@ -1,4 +1,4 @@ -{{- if and .Values.agent (not .Values.preflight.enabled) }} +{{- if and .Values.agent (not .Values.preflight.enabled) .Values.rbac.create }} {{- /* Keep file in sync with cilium-preflight/clusterrole.yaml */ -}} @@ -41,6 +41,15 @@ rules: - get - list - watch +{{- if and .Values.hubble.enabled .Values.hubble.dropEventEmitter.enabled }} +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +{{- end }} {{- if .Values.annotateK8sNode }} - apiGroups: - "" @@ -139,8 +148,6 @@ rules: - apiGroups: - cilium.io resources: - - ciliumnetworkpolicies/status - - ciliumclusterwidenetworkpolicies/status - ciliumendpoints/status - ciliumendpoints - ciliuml2announcementpolicies/status diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-agent/clusterrolebinding.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-agent/clusterrolebinding.yaml index 34b0a58b8..f05729ad8 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-agent/clusterrolebinding.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-agent/clusterrolebinding.yaml @@ -1,4 +1,4 @@ -{{- if and .Values.agent (not .Values.preflight.enabled) .Values.serviceAccounts.cilium.create }} +{{- if and .Values.agent (not .Values.preflight.enabled) .Values.serviceAccounts.cilium.create .Values.rbac.create }} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-agent/daemonset.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-agent/daemonset.yaml index 2949091a8..474acc181 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-agent/daemonset.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-agent/daemonset.yaml @@ -9,6 +9,7 @@ {{- end -}} {{- $kubeProxyReplacement := (coalesce .Values.kubeProxyReplacement "false") -}} +{{- $envoyDS := eq (include "envoyDaemonSetEnabled" .) "true" -}} --- apiVersion: apps/v1 @@ -122,7 +123,6 @@ spec: {{- with .Values.extraArgs }} {{- toYaml . | trim | nindent 8 }} {{- end }} - {{- if semverCompare ">=1.20-0" .Capabilities.KubeVersion.Version }} startupProbe: httpGet: host: {{ .Values.ipv4.enabled | ternary "127.0.0.1" "::1" | quote }} @@ -136,7 +136,6 @@ spec: periodSeconds: {{ .Values.startupProbe.periodSeconds }} successThreshold: 1 initialDelaySeconds: 5 - {{- end }} livenessProbe: {{- if or .Values.keepDeprecatedProbes $defaultKeepDeprecatedProbes }} exec: @@ -154,14 +153,6 @@ spec: - name: "brief" value: "true" {{- end }} - {{- if semverCompare "<1.20-0" .Capabilities.KubeVersion.Version }} - # The initial delay for the liveness probe is intentionally large to - # avoid an endless kill & restart cycle if in the event that the initial - # bootstrapping takes longer than expected. - # Starting from Kubernetes 1.20, we are using startupProbe instead - # of this field. - initialDelaySeconds: 120 - {{- end }} periodSeconds: {{ .Values.livenessProbe.periodSeconds }} successThreshold: 1 failureThreshold: {{ .Values.livenessProbe.failureThreshold }} @@ -183,9 +174,6 @@ spec: - name: "brief" value: "true" {{- end }} - {{- if semverCompare "<1.20-0" .Capabilities.KubeVersion.Version }} - initialDelaySeconds: 5 - {{- end }} periodSeconds: {{ .Values.readinessProbe.periodSeconds }} successThreshold: 1 failureThreshold: {{ .Values.readinessProbe.failureThreshold }} @@ -211,11 +199,9 @@ spec: divisor: '1' {{- if .Values.k8sServiceHost }} - name: KUBERNETES_SERVICE_HOST - value: {{ .Values.k8sServiceHost | quote }} - {{- end }} - {{- if .Values.k8sServicePort }} + value: {{ include "k8sServiceHost" . }} - name: KUBERNETES_SERVICE_PORT - value: {{ .Values.k8sServicePort | quote }} + value: {{ include "k8sServicePort" . }} {{- end }} {{- with .Values.extraEnv }} {{- toYaml . | trim | nindent 8 }} @@ -251,10 +237,16 @@ spec: containerPort: {{ .Values.prometheus.port }} hostPort: {{ .Values.prometheus.port }} protocol: TCP - {{- if and .Values.proxy.prometheus.enabled .Values.envoy.prometheus.enabled (not .Values.envoy.enabled) }} + {{- if and .Values.envoy.prometheus.enabled (not $envoyDS) }} - name: envoy-metrics - containerPort: {{ .Values.proxy.prometheus.port | default .Values.envoy.prometheus.port }} - hostPort: {{ .Values.proxy.prometheus.port | default .Values.envoy.prometheus.port }} + containerPort: {{ .Values.envoy.prometheus.port }} + hostPort: {{ .Values.envoy.prometheus.port }} + protocol: TCP + {{- end }} + {{- if and .Values.envoy.debug.admin.port (not $envoyDS) }} + - name: envoy-admin + containerPort: {{ .Values.envoy.debug.admin.port }} + hostPort: {{ .Values.envoy.debug.admin.port }} protocol: TCP {{- end }} {{- end }} @@ -288,7 +280,7 @@ spec: mountPath: {{ dir .Values.authentication.mutual.spire.adminSocketPath }} readOnly: false {{- end }} - {{- if .Values.envoy.enabled }} + {{- if $envoyDS }} - name: envoy-sockets mountPath: /var/run/cilium/envoy/sockets readOnly: false @@ -303,8 +295,7 @@ spec: - mountPath: /host/proc/sys/kernel name: host-proc-sys-kernel {{- end}} - {{- /* CRI-O already mounts the BPF filesystem */ -}} - {{- if and .Values.bpf.autoMount.enabled (not (eq .Values.containerRuntime.integration "crio")) }} + {{- if .Values.bpf.autoMount.enabled }} - name: bpf-maps mountPath: /sys/fs/bpf {{- if .Values.securityContext.privileged }} @@ -324,13 +315,20 @@ spec: {{- end}} - name: cilium-run mountPath: /var/run/cilium + {{- /* mount the directory if socketLB.enabled is true and socketLB.terminatePodConnections is not explicitly set to false */ -}} + {{- if or (and (kindIs "invalid" .Values.socketLB.terminatePodConnections) .Values.socketLB.enabled) + (and .Values.socketLB.enabled .Values.socketLB.terminatePodConnections) }} + - name: cilium-netns + mountPath: /var/run/cilium/netns + mountPropagation: HostToContainer + {{- end}} - name: etc-cni-netd mountPath: {{ .Values.cni.hostConfDirMountPath }} {{- if .Values.etcd.enabled }} - name: etcd-config-path mountPath: /var/lib/etcd-config readOnly: true - {{- if or .Values.etcd.ssl .Values.etcd.managed }} + {{- if .Values.etcd.ssl }} - name: etcd-secrets mountPath: /var/lib/etcd-secrets readOnly: true @@ -357,7 +355,7 @@ spec: mountPath: /run/xtables.lock {{- if and .Values.encryption.enabled (eq .Values.encryption.type "ipsec") }} - name: cilium-ipsec-secrets - mountPath: {{ .Values.encryption.ipsec.mountPath | default .Values.encryption.mountPath }} + mountPath: {{ .Values.encryption.ipsec.mountPath }} {{- end }} {{- if .Values.kubeConfigPath }} - name: kube-config @@ -369,6 +367,11 @@ spec: mountPath: /var/lib/cilium/bgp readOnly: true {{- end }} + {{- if and .Values.hubble.enabled .Values.hubble.metrics.enabled .Values.hubble.metrics.tls.enabled }} + - name: hubble-metrics-tls + mountPath: /var/lib/cilium/tls/hubble-metrics + readOnly: true + {{- end }} {{- if and .Values.hubble.enabled .Values.hubble.tls.enabled (hasKey .Values.hubble "listenAddress") }} - name: hubble-tls mountPath: /var/lib/cilium/tls/hubble @@ -456,11 +459,9 @@ spec: fieldPath: metadata.namespace {{- if .Values.k8sServiceHost }} - name: KUBERNETES_SERVICE_HOST - value: {{ .Values.k8sServiceHost | quote }} - {{- end }} - {{- if .Values.k8sServicePort }} + value: {{ include "k8sServiceHost" . }} - name: KUBERNETES_SERVICE_PORT - value: {{ .Values.k8sServicePort | quote }} + value: {{ include "k8sServicePort" . }} {{- end }} {{- with .Values.extraEnv }} {{- toYaml . | nindent 8 }} @@ -526,6 +527,8 @@ spec: drop: - ALL {{- end}} + {{- end }} + {{- if .Values.sysctlfix.enabled }} - name: apply-sysctl-overwrites image: {{ include "cilium.image" .Values.image | quote }} imagePullPolicy: {{ .Values.image.pullPolicy }} @@ -591,8 +594,7 @@ spec: terminationMessagePolicy: FallbackToLogsOnError securityContext: privileged: true - {{- /* CRI-O already mounts the BPF filesystem */ -}} - {{- if and .Values.bpf.autoMount.enabled (not (eq .Values.containerRuntime.integration "crio")) }} + {{- if and .Values.bpf.autoMount.enabled }} volumeMounts: - name: bpf-maps mountPath: /sys/fs/bpf @@ -646,11 +648,9 @@ spec: optional: true {{- if .Values.k8sServiceHost }} - name: KUBERNETES_SERVICE_HOST - value: {{ .Values.k8sServiceHost | quote }} - {{- end }} - {{- if .Values.k8sServicePort }} + value: {{ include "k8sServiceHost" . }} - name: KUBERNETES_SERVICE_PORT - value: {{ .Values.k8sServicePort | quote }} + value: {{ include "k8sServicePort" . }} {{- end }} {{- with .Values.extraEnv }} {{- toYaml . | nindent 8 }} @@ -673,8 +673,7 @@ spec: - ALL {{- end}} volumeMounts: - {{- /* CRI-O already mounts the BPF filesystem */ -}} - {{- if and .Values.bpf.autoMount.enabled (not (eq .Values.containerRuntime.integration "crio")) }} + {{- if .Values.bpf.autoMount.enabled}} - name: bpf-maps mountPath: /sys/fs/bpf {{- end }} @@ -756,18 +755,16 @@ spec: - name: cni-path mountPath: /host/opt/cni/bin {{- end }} # .Values.cni.install + {{- if .Values.extraInitContainers }} + {{- toYaml .Values.extraInitContainers | nindent 6 }} + {{- end }} restartPolicy: Always priorityClassName: {{ include "cilium.priorityClass" (list $ .Values.priorityClassName "system-node-critical") }} - serviceAccount: {{ .Values.serviceAccounts.cilium.name | quote }} serviceAccountName: {{ .Values.serviceAccounts.cilium.name | quote }} automountServiceAccountToken: {{ .Values.serviceAccounts.cilium.automount }} terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} hostNetwork: true - {{- if and .Values.etcd.managed (not .Values.etcd.k8sService) }} - # In managed etcd mode, Cilium must be able to resolve the DNS name of - # the etcd service - dnsPolicy: ClusterFirstWithHostNet - {{- else if .Values.dnsPolicy }} + {{- if .Values.dnsPolicy }} dnsPolicy: {{ .Values.dnsPolicy }} {{- end }} {{- with .Values.affinity }} @@ -800,16 +797,23 @@ spec: hostPath: path: {{ .Values.daemon.runPath }} type: DirectoryOrCreate - {{- /* CRI-O already mounts the BPF filesystem */ -}} - {{- if and .Values.bpf.autoMount.enabled (not (eq .Values.containerRuntime.integration "crio")) }} + {{- if or (and (kindIs "invalid" .Values.socketLB.terminatePodConnections) .Values.socketLB.enabled) + (and .Values.socketLB.enabled .Values.socketLB.terminatePodConnections) }} + # To exec into pod network namespaces + - name: cilium-netns + hostPath: + path: /var/run/netns + type: DirectoryOrCreate + {{- end }} + {{- if .Values.bpf.autoMount.enabled }} # To keep state between restarts / upgrades for bpf maps - name: bpf-maps hostPath: path: /sys/fs/bpf type: DirectoryOrCreate {{- end }} - {{- if .Values.cgroup.autoMount.enabled }} - # To mount cgroup2 filesystem on the host + {{- if or .Values.cgroup.autoMount.enabled .Values.sysctlfix.enabled }} + # To mount cgroup2 filesystem on the host or apply sysctlfix - name: hostproc hostPath: path: /proc @@ -845,7 +849,7 @@ spec: path: {{ dir .Values.authentication.mutual.spire.adminSocketPath }} type: DirectoryOrCreate {{- end }} - {{- if .Values.envoy.enabled }} + {{- if $envoyDS }} # Sharing socket with Cilium Envoy on the same node by using a host path - name: envoy-sockets hostPath: @@ -875,7 +879,7 @@ spec: - key: etcd-config path: etcd.config # To read the k8s etcd secrets in case the user might want to use TLS - {{- if or .Values.etcd.ssl .Values.etcd.managed }} + {{- if .Values.etcd.ssl }} - name: etcd-secrets secret: secretName: cilium-etcd-secrets @@ -916,6 +920,29 @@ spec: - key: {{ .Values.tls.caBundle.key }} path: common-etcd-client-ca.crt {{- end }} + # note: we configure the volume for the kvstoremesh-specific certificate + # regardless of whether KVStoreMesh is enabled or not, so that it can be + # automatically mounted in case KVStoreMesh gets subsequently enabled, + # without requiring an agent restart. + - secret: + name: clustermesh-apiserver-local-cert + optional: true + items: + - key: tls.key + path: local-etcd-client.key + - key: tls.crt + path: local-etcd-client.crt + {{- if not .Values.tls.caBundle.enabled }} + - key: ca.crt + path: local-etcd-client-ca.crt + {{- else }} + - {{ .Values.tls.caBundle.useSecret | ternary "secret" "configMap" }}: + name: {{ .Values.tls.caBundle.name }} + optional: true + items: + - key: {{ .Values.tls.caBundle.key }} + path: local-etcd-client-ca.crt + {{- end }} {{- if and .Values.ipMasqAgent .Values.ipMasqAgent.enabled }} - name: ip-masq-agent configMap: @@ -928,7 +955,7 @@ spec: {{- if and .Values.encryption.enabled (eq .Values.encryption.type "ipsec") }} - name: cilium-ipsec-secrets secret: - secretName: {{ .Values.encryption.ipsec.secretName | default .Values.encryption.secretName }} + secretName: {{ .Values.encryption.ipsec.secretName }} {{- end }} {{- if .Values.cni.configMap }} - name: cni-configuration @@ -976,6 +1003,45 @@ spec: path: client-ca.crt {{- end }} {{- end }} + {{- if and .Values.hubble.enabled .Values.hubble.metrics.enabled .Values.hubble.metrics.tls.enabled }} + - name: hubble-metrics-tls + projected: + # note: the leading zero means this number is in octal representation: do not remove it + defaultMode: 0400 + sources: + - secret: + name: hubble-metrics-server-certs + optional: true + items: + - key: tls.crt + path: server.crt + - key: tls.key + path: server.key + {{- if .Values.hubble.metrics.tls.server.mtls.enabled }} + {{- if .Values.hubble.metrics.tls.server.mtls.name }} + {{/* Use the client CA specified by the user if they configured one */}} + - {{ .Values.hubble.metrics.tls.server.mtls.useSecret | ternary "secret" "configMap" }}: + name: {{ .Values.hubble.metrics.tls.server.mtls.name }} + optional: false + items: + - key: {{ .Values.hubble.metrics.tls.server.mtls.key }} + path: client-ca.crt + {{/* If the CA bundle isn't configured use the server CA as the client CA */}} + {{- else if not .Values.tls.caBundle.enabled }} + - key: ca.crt + path: client-ca.crt + {{/* Fall back to the caBundle CA if it's been configured */}} + {{- else }} + - {{ .Values.tls.caBundle.useSecret | ternary "secret" "configMap" }}: + name: {{ .Values.tls.caBundle.name }} + optional: true + items: + - key: {{ .Values.tls.caBundle.key }} + path: client-ca.crt + {{- end }} + {{- end }} + + {{- end }} {{- if .Values.hubble.export.dynamic.enabled }} - name: hubble-flowlog-config configMap: diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-agent/service.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-agent/service.yaml index f6ec4951f..df97f5ca1 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-agent/service.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-agent/service.yaml @@ -1,3 +1,4 @@ +{{- $envoyDS := eq (include "envoyDaemonSetEnabled" .) "true" -}} {{- if and .Values.agent (not .Values.preflight.enabled) .Values.prometheus.enabled }} {{- if .Values.prometheus.serviceMonitor.enabled }} apiVersion: v1 @@ -23,13 +24,13 @@ spec: port: {{ .Values.prometheus.port }} protocol: TCP targetPort: prometheus - {{- if not .Values.envoy.enabled }} + {{- if not $envoyDS }} - name: envoy-metrics - port: {{ .Values.proxy.prometheus.port | default .Values.envoy.prometheus.port }} + port: {{ .Values.envoy.prometheus.port }} protocol: TCP targetPort: envoy-metrics {{- end }} -{{- else if and .Values.proxy.prometheus.enabled .Values.envoy.prometheus.enabled (not .Values.envoy.enabled) }} +{{- else if and .Values.envoy.prometheus.enabled (not $envoyDS) }} apiVersion: v1 kind: Service metadata: @@ -37,7 +38,7 @@ metadata: namespace: {{ .Release.Namespace }} annotations: prometheus.io/scrape: "true" - prometheus.io/port: {{ .Values.proxy.prometheus.port | default .Values.envoy.prometheus.port | quote }} + prometheus.io/port: {{ .Values.envoy.prometheus.port | quote }} labels: k8s-app: cilium app.kubernetes.io/name: cilium-agent @@ -49,7 +50,7 @@ spec: k8s-app: cilium ports: - name: envoy-metrics - port: {{ .Values.proxy.prometheus.port | default .Values.envoy.prometheus.port }} + port: {{ .Values.envoy.prometheus.port }} protocol: TCP targetPort: envoy-metrics {{- end }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-configmap.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-configmap.yaml index 9531ca18a..946e60009 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-configmap.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-configmap.yaml @@ -1,4 +1,4 @@ -{{- if and (.Values.agent) (not .Values.preflight.enabled) }} +{{- if and ( or (.Values.agent) (.Values.operator.enabled) .Values.externalWorkloads.enabled .Values.clustermesh.useAPIServer) (not .Values.preflight.enabled) }} {{- /* Default values with backwards compatibility */ -}} {{- $defaultBpfMapDynamicSizeRatio := 0.0 -}} {{- $defaultBpfMasquerade := "false" -}} @@ -15,6 +15,7 @@ {{- $defaultK8sClientQPS := 5 -}} {{- $defaultK8sClientBurst := 10 -}} {{- $defaultDNSProxyEnableTransparentMode := "false" -}} +{{- $envoyDS := eq (include "envoyDaemonSetEnabled" .) "true" -}} {{- /* Default values when 1.8 was initially deployed */ -}} {{- if semverCompare ">=1.8" (default "1.8" .Values.upgradeCompatibility) -}} @@ -66,8 +67,8 @@ {{- $stringValueKPR = "" -}} {{- end}} {{- $kubeProxyReplacement := (coalesce $stringValueKPR $defaultKubeProxyReplacement) -}} -{{- if and (ne $kubeProxyReplacement "disabled") (ne $kubeProxyReplacement "partial") (ne $kubeProxyReplacement "strict") (ne $kubeProxyReplacement "true") (ne $kubeProxyReplacement "false") }} - {{ fail "kubeProxyReplacement must be explicitly set to a valid value (true, false, disabled (deprecated), partial (deprecated), or strict (deprecated)) to continue." }} +{{- if and (ne $kubeProxyReplacement "true") (ne $kubeProxyReplacement "false") }} + {{ fail "kubeProxyReplacement must be explicitly set to a valid value (true or false) to continue." }} {{- end }} {{- $azureUsePrimaryAddress = (coalesce .Values.azure.usePrimaryAddress $azureUsePrimaryAddress) -}} {{- $socketLB := (coalesce .Values.socketLB .Values.hostServices) -}} @@ -92,28 +93,19 @@ metadata: data: {{- if .Values.etcd.enabled }} # The kvstore configuration is used to enable use of a kvstore for state - # storage. This can either be provided with an external kvstore or with the - # help of cilium-etcd-operator which operates an etcd cluster automatically. + # storage. This can be provided with an external kvstore. kvstore: etcd - {{- if .Values.etcd.k8sService }} - kvstore-opt: '{"etcd.config": "/var/lib/etcd-config/etcd.config", "etcd.operator": "true"}' - {{- else }} kvstore-opt: '{"etcd.config": "/var/lib/etcd-config/etcd.config"}' - {{- end }} # This etcd-config contains the etcd endpoints of your cluster. If you use # TLS please make sure you follow the tutorial in https://cilium.link/etcd-config etcd-config: |- --- endpoints: - {{- if .Values.etcd.managed }} - - https://cilium-etcd-client.{{ .Release.Namespace }}.svc:2379 - {{- else }} {{- range .Values.etcd.endpoints }} - {{ . }} {{- end }} - {{- end }} - {{- if or .Values.etcd.ssl .Values.etcd.managed }} + {{- if .Values.etcd.ssl }} trusted-ca-file: '/var/lib/etcd-secrets/etcd-client-ca.crt' key-file: '/var/lib/etcd-secrets/etcd-client.key' cert-file: '/var/lib/etcd-secrets/etcd-client.crt' @@ -148,10 +140,6 @@ data: cilium-endpoint-gc-interval: {{ include "validateDuration" .Values.operator.endpointGCInterval | quote }} nodes-gc-interval: {{ include "validateDuration" .Values.operator.nodeGCInterval | quote }} -{{- if hasKey .Values.operator "skipCNPStatusStartupClean" }} - skip-cnp-status-startup-clean: "{{ .Values.operator.skipCNPStatusStartupClean }}" -{{- end }} - {{- if eq .Values.disableEndpointCRD true }} # Disable the usage of CiliumEndpoint CRD disable-endpoint-crd: "true" @@ -226,11 +214,15 @@ data: {{- end }} {{- end }} -{{- if not .Values.envoy.enabled }} +{{- if not $envoyDS }} # Port to expose Envoy metrics (e.g. "9964"). Envoy metrics listener will be disabled if this # field is not set. - {{- if and .Values.proxy.prometheus.enabled .Values.envoy.prometheus.enabled }} - proxy-prometheus-port: "{{ .Values.proxy.prometheus.port | default .Values.envoy.prometheus.port }}" + {{- if .Values.envoy.prometheus.enabled }} + proxy-prometheus-port: "{{ .Values.envoy.prometheus.port }}" + {{- end }} + + {{- if and .Values.envoy.debug.admin.enabled .Values.envoy.debug.admin.port }} + proxy-admin-port: "{{ .Values.envoy.debug.admin.port }}" {{- end }} {{- end }} @@ -249,6 +241,7 @@ data: {{- if or .Values.envoyConfig.enabled .Values.ingressController.enabled .Values.gatewayAPI.enabled (and (hasKey .Values "loadBalancer") (eq .Values.loadBalancer.l7.backend "envoy")) }} enable-envoy-config: "true" + envoy-config-retry-interval: {{ include "validateDuration" .Values.envoyConfig.retryInterval | quote }} {{- if .Values.envoyConfig.enabled }} envoy-secrets-namespace: {{ .Values.envoyConfig.secretsNamespace.name | quote }} {{- end }} @@ -267,12 +260,22 @@ data: ingress-default-secret-namespace: {{ .Values.ingressController.defaultSecretNamespace | quote }} ingress-default-secret-name: {{ .Values.ingressController.defaultSecretName | quote }} {{- end }} + ingress-hostnetwork-enabled: {{ .Values.ingressController.hostNetwork.enabled | quote }} + ingress-hostnetwork-shared-listener-port: {{ .Values.ingressController.hostNetwork.sharedListenerPort | quote }} + ingress-hostnetwork-nodelabelselector: {{ include "mapToString" .Values.ingressController.hostNetwork.nodes.matchLabels | quote }} {{- end }} {{- if .Values.gatewayAPI.enabled }} enable-gateway-api: "true" enable-gateway-api-secrets-sync: {{ .Values.gatewayAPI.secretsNamespace.sync | quote }} + enable-gateway-api-proxy-protocol: {{ .Values.gatewayAPI.enableProxyProtocol | quote }} + enable-gateway-api-app-protocol: {{ or .Values.gatewayAPI.enableAppProtocol .Values.gatewayAPI.enableAlpn | quote }} + enable-gateway-api-alpn: {{ .Values.gatewayAPI.enableAlpn | quote }} + gateway-api-xff-num-trusted-hops: {{ .Values.gatewayAPI.xffNumTrustedHops | quote }} + gateway-api-service-externaltrafficpolicy: {{ .Values.gatewayAPI.externalTrafficPolicy | quote }} gateway-api-secrets-namespace: {{ .Values.gatewayAPI.secretsNamespace.name | quote }} + gateway-api-hostnetwork-enabled: {{ .Values.gatewayAPI.hostNetwork.enabled | quote }} + gateway-api-hostnetwork-nodelabelselector: {{ include "mapToString" .Values.gatewayAPI.hostNetwork.nodes.matchLabels | quote }} {{- end }} {{- if hasKey .Values "loadBalancer" }} @@ -419,6 +422,10 @@ data: bpf-lb-external-clusterip: {{ .Values.bpf.lbExternalClusterIP | quote }} {{- end }} + bpf-events-drop-enabled: {{ .Values.bpf.events.drop.enabled | quote }} + bpf-events-policy-verdict-enabled: {{ .Values.bpf.events.policyVerdict.enabled | quote }} + bpf-events-trace-enabled: {{ .Values.bpf.events.trace.enabled | quote }} + # Pre-allocation of map entries allows per-packet latency to be reduced, at # the expense of up-front memory allocation for the entries in the maps. The # default value below will minimize memory usage in the default installation; @@ -436,10 +443,6 @@ data: # 1.4 or later, then it may cause one-time disruptions during the upgrade. preallocate-bpf-maps: "{{ .Values.bpf.preallocateMaps }}" - # Regular expression matching compatible Istio sidecar istio-proxy - # container image names - sidecar-istio-proxy-image: "{{ .Values.proxy.sidecarImageRegex }}" - # Name of the cluster. Only relevant when building a mesh of clusters. cluster-name: {{ .Values.cluster.name }} @@ -491,7 +494,9 @@ data: {{- end }} {{- if .Values.eni.enabled }} + {{- if not .Values.endpointRoutes.enabled }} enable-endpoint-routes: "true" + {{- end }} auto-create-cilium-node-resource: "true" {{- if .Values.eni.updateEC2AdapterLimitViaAPI }} update-ec2-adapter-limit-via-api: "true" @@ -569,6 +574,14 @@ data: enable-ipv6-big-tcp: {{ .Values.enableIPv6BIGTCP | quote }} enable-ipv6-masquerade: {{ .Values.enableIPv6Masquerade | quote }} +{{- if hasKey .Values.bpf "enableTCX" }} + enable-tcx: {{ .Values.bpf.enableTCX | quote }} +{{- end }} + +{{- if hasKey .Values.bpf "datapathMode" }} + datapath-mode: {{ .Values.bpf.datapathMode | quote }} +{{- end }} + {{- if (not (kindIs "invalid" .Values.bpf.masquerade)) }} enable-bpf-masquerade: {{ .Values.bpf.masquerade | quote }} {{- else if eq $defaultBpfMasquerade "true" }} @@ -588,13 +601,9 @@ data: {{- if and .Values.encryption.ipsec.mountPath .Values.encryption.ipsec.keyFile }} ipsec-key-file: {{ .Values.encryption.ipsec.mountPath }}/{{ .Values.encryption.ipsec.keyFile }} - {{- else }} - ipsec-key-file: {{ .Values.encryption.mountPath }}/{{ .Values.encryption.keyFile }} {{- end }} {{- if .Values.encryption.ipsec.interface }} encrypt-interface: {{ .Values.encryption.ipsec.interface }} - {{- else if .Values.encryption.interface }} - encrypt-interface: {{ .Values.encryption.interface }} {{- end }} {{- if hasKey .Values.encryption.ipsec "keyWatcher" }} enable-ipsec-key-watcher: {{ .Values.encryption.ipsec.keyWatcher | quote }} @@ -602,6 +611,7 @@ data: {{- if .Values.encryption.ipsec.keyRotationDuration }} ipsec-key-rotation-duration: {{ include "validateDuration" .Values.encryption.ipsec.keyRotationDuration | quote }} {{- end }} + enable-ipsec-encrypted-overlay: {{ .Values.encryption.ipsec.encryptedOverlay | quote }} {{- else if eq .Values.encryption.type "wireguard" }} enable-wireguard: {{ .Values.encryption.enabled | quote }} {{- if .Values.encryption.wireguard.userspaceFallback }} @@ -640,6 +650,7 @@ data: {{- end }} auto-direct-node-routes: {{ .Values.autoDirectNodeRoutes | quote }} + direct-routing-skip-unreachable: {{ .Values.directRoutingSkipUnreachable | quote }} {{- if hasKey .Values "bandwidthManager" }} {{- if .Values.bandwidthManager.enabled }} @@ -688,6 +699,10 @@ data: enable-runtime-device-detection: "true" {{- end }} +{{- if .Values.forceDeviceDetection }} + force-device-detection: "true" +{{- end }} + kube-proxy-replacement: {{ $kubeProxyReplacement | quote }} {{- if ne $kubeProxyReplacement "disabled" }} @@ -697,10 +712,14 @@ data: {{- if $socketLB }} {{- if hasKey $socketLB "enabled" }} bpf-lb-sock: {{ $socketLB.enabled | quote }} + bpf-lb-sock-terminate-pod-connections: {{ $socketLB.enabled | quote }} {{- end }} {{- if hasKey $socketLB "hostNamespaceOnly" }} bpf-lb-sock-hostns-only: {{ $socketLB.hostNamespaceOnly | quote }} {{- end }} +{{- if hasKey $socketLB "terminatePodConnections" }} + bpf-lb-sock-terminate-pod-connections: {{ $socketLB.terminatePodConnections | quote }} +{{- end }} {{- end }} {{- if hasKey .Values "hostPort" }} @@ -720,6 +739,9 @@ data: {{- if hasKey .Values.nodePort "range" }} node-port-range: {{ get .Values.nodePort "range" | quote }} {{- end }} +{{- if hasKey .Values.nodePort "addresses" }} + nodeport-addresses: {{ get .Values.nodePort "addresses" | join "," | quote }} +{{- end }} {{- if hasKey .Values.nodePort "directRoutingDevice" }} direct-routing-device: {{ .Values.nodePort.directRoutingDevice | quote }} {{- end }} @@ -799,13 +821,10 @@ data: {{- if hasKey .Values.k8s "requireIPv6PodCIDR" }} k8s-require-ipv6-pod-cidr: {{ .Values.k8s.requireIPv6PodCIDR | quote }} {{- end }} -{{- if .Values.endpointStatus.enabled }} - endpoint-status: {{ required "endpointStatus.status required: policy, health, controllers, log and / or state. For 2 or more options use a space: \"policy health\"" .Values.endpointStatus.status | quote }} -{{- end }} {{- if and .Values.endpointRoutes .Values.endpointRoutes.enabled }} enable-endpoint-routes: {{ .Values.endpointRoutes.enabled | quote }} {{- end }} -{{- if and .Values.k8sNetworkPolicy .Values.k8sNetworkPolicy.enabled }} +{{- if hasKey .Values.k8sNetworkPolicy "enabled" }} enable-k8s-networkpolicy: {{ .Values.k8sNetworkPolicy.enabled | quote }} {{- end }} {{- if .Values.cni.configMap }} @@ -832,6 +851,9 @@ data: {{- if (not (kindIs "invalid" .Values.cni.externalRouting)) }} cni-external-routing: {{ .Values.cni.externalRouting | quote }} {{- end}} +{{- if .Values.cni.enableRouteMTUForCNIChaining }} + enable-route-mtu-for-cni-chaining: {{ .Values.cni.enableRouteMTUForCNIChaining | quote }} +{{- end }} {{- if .Values.kubeConfigPath }} k8s-kubeconfig-path: {{ .Values.kubeConfigPath | quote }} {{- end }} @@ -844,12 +866,12 @@ data: {{- if hasKey .Values "healthChecking" }} enable-health-checking: {{ .Values.healthChecking | quote }} {{- end }} -{{- if or .Values.wellKnownIdentities.enabled .Values.etcd.managed }} +{{- if .Values.wellKnownIdentities.enabled }} enable-well-known-identities: "true" {{- else }} enable-well-known-identities: "false" {{- end }} - enable-remote-node-identity: {{ .Values.remoteNodeIdentity | quote }} + enable-node-selector-labels: {{ .Values.nodeSelectorLabels | quote }} {{- if hasKey .Values "synchronizeK8sNodes" }} synchronize-k8s-nodes: {{ .Values.synchronizeK8sNodes | quote }} @@ -881,6 +903,14 @@ data: # Address to expose Hubble metrics (e.g. ":7070"). Metrics server will be disabled if this # field is not set. hubble-metrics-server: ":{{ .Values.hubble.metrics.port }}" + hubble-metrics-server-enable-tls: "{{ .Values.hubble.metrics.tls.enabled }}" + {{- if .Values.hubble.metrics.tls.enabled }} + hubble-metrics-server-tls-cert-file: /var/lib/cilium/tls/hubble-metrics/server.crt + hubble-metrics-server-tls-key-file: /var/lib/cilium/tls/hubble-metrics/server.key + {{- if .Values.hubble.metrics.tls.server.mtls.enabled }} + hubble-metrics-server-tls-client-ca-files: /var/lib/cilium/tls/hubble-metrics/client-ca.crt + {{- end }} + {{- end }} # A space separated list of metrics to enable. See [0] for available metrics. # # https://github.com/cilium/hubble/blob/master/Documentation/metrics.md @@ -944,6 +974,11 @@ data: hubble-disable-tls: "true" {{- end }} {{- end }} +{{- if .Values.hubble.dropEventEmitter.enabled }} + hubble-drop-events: "true" + hubble-drop-events-interval: {{ .Values.hubble.dropEventEmitter.interval | quote }} + hubble-drop-events-reasons: {{ .Values.hubble.dropEventEmitter.reasons | join " " | quote }} +{{- end }} {{- if .Values.hubble.preferIpv6 }} hubble-prefer-ipv6: "true" {{- end }} @@ -1007,6 +1042,10 @@ data: limit-ipam-api-qps: {{ .Values.ipam.operator.externalAPILimitQPS | quote }} {{- end }} +{{- if .Values.nodeIPAM.enabled }} + enable-node-ipam: "true" +{{- end }} + {{- if .Values.apiRateLimit }} api-rate-limit: {{ .Values.apiRateLimit | quote }} {{- end }} @@ -1014,9 +1053,6 @@ data: {{- if .Values.egressGateway.enabled }} enable-ipv4-egress-gateway: "true" {{- end }} -{{- if .Values.egressGateway.installRoutes }} - install-egress-gateway-routes: "true" -{{- end }} {{- if hasKey .Values.egressGateway "reconciliationTriggerInterval" }} egress-gateway-reconciliation-trigger-interval: {{ .Values.egressGateway.reconciliationTriggerInterval | quote }} {{- end }} @@ -1092,8 +1128,6 @@ data: {{- if .Values.bgpControlPlane.enabled }} enable-bgp-control-plane: "true" bgp-secrets-namespace: {{ .Values.bgpControlPlane.secretsNamespace.name | quote }} -{{- else }} - enable-bgp-control-plane: "false" {{- end }} {{- if .Values.pmtuDiscovery.enabled }} @@ -1117,8 +1151,15 @@ data: vlan-bpf-bypass: {{ .Values.bpf.vlanBypass | join " " | quote }} {{- end }} -{{- if .Values.enableCiliumEndpointSlice }} +{{- if .Values.bpf.disableExternalIPMitigation }} + disable-external-ip-mitigation: {{ .Values.bpf.disableExternalIPMitigation | quote }} +{{- end }} + +{{- if or .Values.ciliumEndpointSlice.enabled .Values.enableCiliumEndpointSlice }} enable-cilium-endpoint-slice: "true" + {{- if .Values.ciliumEndpointSlice.rateLimits }} + ces-rate-limits: {{ .Values.ciliumEndpointSlice.rateLimits | toJson | quote }} + {{- end }} {{- end }} {{- if hasKey .Values "enableK8sTerminatingEndpoint" }} @@ -1171,6 +1212,9 @@ data: # default DNS proxy to transparent mode in non-chaining modes dnsproxy-enable-transparent-mode: {{ $defaultDNSProxyEnableTransparentMode | quote }} {{- end }} + {{- if (not (kindIs "invalid" .Values.dnsProxy.socketLingerTimeout)) }} + dnsproxy-socket-linger-timeout: {{ .Values.dnsProxy.socketLingerTimeout | quote }} + {{- end }} {{- if .Values.dnsProxy.dnsRejectResponseCode }} tofqdns-dns-reject-response-code: {{ .Values.dnsProxy.dnsRejectResponseCode | quote }} {{- end }} @@ -1231,15 +1275,20 @@ data: proxy-max-connection-duration-seconds: {{ .Values.envoy.maxConnectionDurationSeconds | quote }} proxy-idle-timeout-seconds: {{ .Values.envoy.idleTimeoutDurationSeconds | quote }} - external-envoy-proxy: {{ .Values.envoy.enabled | quote }} + external-envoy-proxy: {{ include "envoyDaemonSetEnabled" . | quote }} + envoy-base-id: {{ .Values.envoy.baseID | quote }} {{- if .Values.envoy.log.path }} envoy-log: {{ .Values.envoy.log.path | quote }} {{- end }} + envoy-keep-cap-netbindservice: {{ .Values.envoy.securityContext.capabilities.keepCapNetBindService | quote }} + {{- if hasKey .Values.clustermesh "maxConnectedClusters" }} max-connected-clusters: {{ .Values.clustermesh.maxConnectedClusters | quote }} {{- end }} + clustermesh-enable-endpoint-sync: {{ .Values.clustermesh.enableEndpointSliceSynchronization | quote }} + clustermesh-enable-mcs-api: {{ .Values.clustermesh.enableMCSAPISupport | quote }} # Extra config allows adding arbitrary properties to the cilium config. # By putting it at the end of the ConfigMap, it's also possible to override existing properties. diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-envoy/configmap.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-envoy/configmap.yaml index 990cf951a..33c5a8330 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-envoy/configmap.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-envoy/configmap.yaml @@ -1,4 +1,5 @@ -{{- if and .Values.envoy.enabled (not .Values.preflight.enabled) }} +{{- $envoyDS := eq (include "envoyDaemonSetEnabled" .) "true" -}} +{{- if and $envoyDS (not .Values.preflight.enabled) }} --- apiVersion: v1 diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-envoy/daemonset.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-envoy/daemonset.yaml index 30b9af0f8..2dfb7ab80 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-envoy/daemonset.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-envoy/daemonset.yaml @@ -1,5 +1,5 @@ -{{- if and .Values.envoy.enabled (not .Values.preflight.enabled) }} - +{{- $envoyDS := eq (include "envoyDaemonSetEnabled" .) "true" -}} +{{- if (and $envoyDS (not .Values.preflight.enabled)) }} --- apiVersion: apps/v1 kind: DaemonSet @@ -26,8 +26,8 @@ spec: template: metadata: annotations: - {{- if and .Values.proxy.prometheus.enabled .Values.envoy.prometheus.enabled (not .Values.envoy.prometheus.serviceMonitor.enabled) }} - prometheus.io/port: "{{ .Values.proxy.prometheus.port | default .Values.envoy.prometheus.port }}" + {{- if and .Values.envoy.prometheus.enabled (not .Values.envoy.prometheus.serviceMonitor.enabled) }} + prometheus.io/port: "{{ .Values.envoy.prometheus.port }}" prometheus.io/scrape: "true" {{- end }} {{- if .Values.envoy.rollOutPods }} @@ -74,8 +74,12 @@ spec: command: - /usr/bin/cilium-envoy-starter args: + {{- if .Values.envoy.securityContext.capabilities.keepCapNetBindService }} + - '--keep-cap-net-bind-service' + {{- end }} + - '--' - '-c /var/run/cilium/envoy/bootstrap-config.json' - - '--base-id 0' + - '--base-id {{ int .Values.envoy.baseID }}' {{- if and (.Values.debug.enabled) (hasKey .Values.debug "verbose") (.Values.debug.verbose) (has "envoy" ( splitList " " .Values.debug.verbose )) }} - '--log-level trace' {{- else if and (.Values.debug.enabled) (hasKey .Values.debug "verbose") (.Values.debug.verbose) (has "flow" ( splitList " " .Values.debug.verbose )) }} @@ -90,7 +94,6 @@ spec: {{- with .Values.envoy.extraArgs }} {{- toYaml . | trim | nindent 8 }} {{- end }} - {{- if semverCompare ">=1.20-0" .Capabilities.KubeVersion.Version }} startupProbe: httpGet: host: {{ .Values.ipv4.enabled | ternary "127.0.0.1" "::1" | quote }} @@ -101,21 +104,12 @@ spec: periodSeconds: {{ .Values.envoy.startupProbe.periodSeconds }} successThreshold: 1 initialDelaySeconds: 5 - {{- end }} livenessProbe: httpGet: host: {{ .Values.ipv4.enabled | ternary "127.0.0.1" "::1" | quote }} path: /healthz port: {{ .Values.envoy.healthPort }} scheme: HTTP - {{- if semverCompare "<1.20-0" .Capabilities.KubeVersion.Version }} - # The initial delay for the liveness probe is intentionally large to - # avoid an endless kill & restart cycle if in the event that the initial - # bootstrapping takes longer than expected. - # Starting from Kubernetes 1.20, we are using startupProbe instead - # of this field. - initialDelaySeconds: 120 - {{- end }} periodSeconds: {{ .Values.envoy.livenessProbe.periodSeconds }} successThreshold: 1 failureThreshold: {{ .Values.envoy.livenessProbe.failureThreshold }} @@ -126,9 +120,6 @@ spec: path: /healthz port: {{ .Values.envoy.healthPort }} scheme: HTTP - {{- if semverCompare "<1.20-0" .Capabilities.KubeVersion.Version }} - initialDelaySeconds: 5 - {{- end }} periodSeconds: {{ .Values.envoy.readinessProbe.periodSeconds }} successThreshold: 1 failureThreshold: {{ .Values.envoy.readinessProbe.failureThreshold }} @@ -146,11 +137,9 @@ spec: fieldPath: metadata.namespace {{- if .Values.k8sServiceHost }} - name: KUBERNETES_SERVICE_HOST - value: {{ .Values.k8sServiceHost | quote }} - {{- end }} - {{- if .Values.k8sServicePort }} + value: {{ include "k8sServiceHost" . }} - name: KUBERNETES_SERVICE_PORT - value: {{ .Values.k8sServicePort | quote }} + value: {{ include "k8sServicePort" . }} {{- end }} {{- with .Values.envoy.extraEnv }} {{- toYaml . | trim | nindent 8 }} @@ -159,12 +148,18 @@ spec: resources: {{- toYaml . | trim | nindent 10 }} {{- end }} - {{- if and .Values.proxy.prometheus.enabled .Values.envoy.prometheus.enabled }} + {{- if .Values.envoy.prometheus.enabled }} ports: - name: envoy-metrics - containerPort: {{ .Values.proxy.prometheus.port | default .Values.envoy.prometheus.port }} - hostPort: {{ .Values.proxy.prometheus.port | default .Values.envoy.prometheus.port }} + containerPort: {{ .Values.envoy.prometheus.port }} + hostPort: {{ .Values.envoy.prometheus.port }} protocol: TCP + {{- if and .Values.envoy.debug.admin.enabled .Values.envoy.debug.admin.port }} + - name: envoy-admin + containerPort: {{ .Values.envoy.debug.admin.port }} + hostPort: {{ .Values.envoy.debug.admin.port }} + protocol: TCP + {{- end }} {{- end }} securityContext: {{- if .Values.envoy.securityContext.privileged }} @@ -214,7 +209,6 @@ spec: {{- end }} restartPolicy: Always priorityClassName: {{ include "cilium.priorityClass" (list $ .Values.envoy.priorityClassName "system-node-critical") }} - serviceAccount: {{ .Values.serviceAccounts.envoy.name | quote }} serviceAccountName: {{ .Values.serviceAccounts.envoy.name | quote }} automountServiceAccountToken: {{ .Values.serviceAccounts.envoy.automount }} terminationGracePeriodSeconds: {{ .Values.envoy.terminationGracePeriodSeconds }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-envoy/service.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-envoy/service.yaml deleted file mode 100644 index d238c62e6..000000000 --- a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-envoy/service.yaml +++ /dev/null @@ -1,32 +0,0 @@ -{{- if and .Values.envoy.enabled (not .Values.preflight.enabled) .Values.proxy.prometheus.enabled .Values.envoy.prometheus.enabled }} -apiVersion: v1 -kind: Service -metadata: - name: cilium-envoy - namespace: {{ .Release.Namespace }} - {{- if or (not .Values.envoy.prometheus.serviceMonitor.enabled) .Values.envoy.annotations }} - annotations: - {{- if not .Values.envoy.prometheus.serviceMonitor.enabled }} - prometheus.io/scrape: "true" - prometheus.io/port: {{ .Values.proxy.prometheus.port | default .Values.envoy.prometheus.port | quote }} - {{- end }} - {{- with .Values.envoy.annotations }} - {{- toYaml . | nindent 4 }} - {{- end }} - {{- end }} - labels: - k8s-app: cilium-envoy - app.kubernetes.io/name: cilium-envoy - app.kubernetes.io/part-of: cilium - io.cilium/app: proxy -spec: - clusterIP: None - type: ClusterIP - selector: - k8s-app: cilium-envoy - ports: - - name: envoy-metrics - port: {{ .Values.proxy.prometheus.port | default .Values.envoy.prometheus.port }} - protocol: TCP - targetPort: envoy-metrics -{{- end }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-envoy/serviceaccount.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-envoy/serviceaccount.yaml index 607dabcac..710506e19 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-envoy/serviceaccount.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-envoy/serviceaccount.yaml @@ -1,4 +1,5 @@ -{{- if and .Values.envoy.enabled (not .Values.preflight.enabled) .Values.serviceAccounts.envoy.create }} +{{- $envoyDS := eq (include "envoyDaemonSetEnabled" .) "true" -}} +{{- if and $envoyDS (not .Values.preflight.enabled) .Values.serviceAccounts.envoy.create }} apiVersion: v1 kind: ServiceAccount metadata: diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-envoy/servicemonitor.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-envoy/servicemonitor.yaml index 10f84d82b..ea7415c4e 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-envoy/servicemonitor.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-envoy/servicemonitor.yaml @@ -1,4 +1,5 @@ -{{- if and .Values.envoy.enabled (not .Values.preflight.enabled) .Values.proxy.prometheus.enabled .Values.envoy.prometheus.enabled .Values.envoy.prometheus.serviceMonitor.enabled }} +{{- $envoyDS := eq (include "envoyDaemonSetEnabled" .) "true" -}} +{{- if and $envoyDS (not .Values.preflight.enabled) .Values.envoy.prometheus.enabled .Values.envoy.prometheus.serviceMonitor.enabled }} --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-gateway-api-class.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-gateway-api-class.yaml index 74a78d8da..30ab1719a 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-gateway-api-class.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-gateway-api-class.yaml @@ -1,10 +1,11 @@ {{- if .Values.gatewayAPI.enabled -}} -{{- if .Capabilities.APIVersions.Has "gateway.networking.k8s.io/v1/GatewayClass" }} +{{- if ( or (eq (.Values.gatewayAPI.gatewayClass.create | toString) "true") (and (.Capabilities.APIVersions.Has "gateway.networking.k8s.io/v1/GatewayClass") (eq (.Values.gatewayAPI.gatewayClass.create | toString) "auto"))) }} apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass metadata: name: cilium spec: controllerName: io.cilium/gateway-controller + description: The default Cilium GatewayClass {{- end}} {{- end}} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-ingress-service.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-ingress-service.yaml index ff6269d22..eac13d766 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-ingress-service.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-ingress-service.yaml @@ -23,18 +23,23 @@ spec: port: 443 protocol: TCP nodePort: {{ .Values.ingressController.service.secureNodePort }} + {{- if .Values.ingressController.hostNetwork.enabled }} + type: ClusterIP + {{- else }} type: {{ .Values.ingressController.service.type }} - {{- if semverCompare ">=1.24-0" .Capabilities.KubeVersion.Version -}} + {{- end }} {{- if .Values.ingressController.service.loadBalancerClass }} loadBalancerClass: {{ .Values.ingressController.service.loadBalancerClass }} {{- end }} {{- if (not (kindIs "invalid" .Values.ingressController.service.allocateLoadBalancerNodePorts)) }} allocateLoadBalancerNodePorts: {{ .Values.ingressController.service.allocateLoadBalancerNodePorts }} {{- end }} - {{- end -}} {{- if .Values.ingressController.service.loadBalancerIP }} loadBalancerIP: {{ .Values.ingressController.service.loadBalancerIP }} {{- end }} + {{- if .Values.ingressController.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.ingressController.service.externalTrafficPolicy }} + {{- end }} --- apiVersion: v1 kind: Endpoints diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-nodeinit/daemonset.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-nodeinit/daemonset.yaml index 3ed09268a..c92eabfa6 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-nodeinit/daemonset.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-nodeinit/daemonset.yaml @@ -114,7 +114,6 @@ spec: hostNetwork: true priorityClassName: {{ include "cilium.priorityClass" (list $ .Values.nodeinit.priorityClassName "system-node-critical") }} {{- if .Values.serviceAccounts.nodeinit.enabled }} - serviceAccount: {{ .Values.serviceAccounts.nodeinit.name | quote }} serviceAccountName: {{ .Values.serviceAccounts.nodeinit.name | quote }} automountServiceAccountToken: {{ .Values.serviceAccounts.nodeinit.automount }} {{- end }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-operator/clusterrole.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-operator/clusterrole.yaml index 7e9080b5c..1bc5de406 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-operator/clusterrole.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-operator/clusterrole.yaml @@ -1,4 +1,4 @@ -{{- if and .Values.operator.enabled .Values.serviceAccounts.operator.create }} +{{- if and .Values.operator.enabled .Values.serviceAccounts.operator.create .Values.rbac.create }} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -56,6 +56,12 @@ rules: resources: - endpointslices verbs: +{{- if .Values.clustermesh.enableEndpointSliceSynchronization }} + - create + - update + - delete + - deletecollection +{{- end }} - get - list - watch @@ -95,6 +101,15 @@ rules: - delete - patch {{- end }} +{{- if .Values.clustermesh.enableEndpointSliceSynchronization }} +- apiGroups: + - "" + resources: + - events + verbs: + - patch + - create +{{- end }} - apiGroups: - cilium.io resources: @@ -215,6 +230,7 @@ rules: resources: - ciliumloadbalancerippools - ciliumpodippools + - ciliumbgppeeringpolicies - ciliumbgpclusterconfigs - ciliumbgpnodeconfigoverrides verbs: @@ -291,4 +307,39 @@ rules: - update - patch {{- end }} +{{- if or .Values.gatewayAPI.enabled .Values.clustermesh.enableMCSAPISupport }} +- apiGroups: + - multicluster.x-k8s.io + resources: + - serviceimports + verbs: + - get + - list + - watch +{{- if .Values.clustermesh.enableMCSAPISupport }} + - create + - update + - patch + - delete +{{- end }} +{{- end }} +{{- if .Values.clustermesh.enableMCSAPISupport }} +- apiGroups: + - multicluster.x-k8s.io + resources: + - serviceexports + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - services + verbs: + - create + - update + - patch + - delete +{{- end }} {{- end }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-operator/clusterrolebinding.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-operator/clusterrolebinding.yaml index f0d00e2e1..1f32800b0 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-operator/clusterrolebinding.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-operator/clusterrolebinding.yaml @@ -1,4 +1,4 @@ -{{- if and .Values.operator.enabled .Values.serviceAccounts.operator.create }} +{{- if and .Values.operator.enabled .Values.serviceAccounts.operator.create .Values.rbac.create }} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-operator/deployment.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-operator/deployment.yaml index 4f4450e51..2b0b536b3 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-operator/deployment.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-operator/deployment.yaml @@ -92,6 +92,10 @@ spec: fieldRef: apiVersion: v1 fieldPath: metadata.namespace + {{- if .Values.clustermesh.enableEndpointSliceSynchronization }} + - name: CILIUM_CLUSTERMESH_CONFIG + value: /var/lib/cilium/clustermesh/ + {{- end }} - name: CILIUM_DEBUG valueFrom: configMapKeyRef: @@ -134,11 +138,9 @@ spec: {{- end }} {{- if .Values.k8sServiceHost }} - name: KUBERNETES_SERVICE_HOST - value: {{ .Values.k8sServiceHost | quote }} - {{- end }} - {{- if .Values.k8sServicePort }} + value: {{ include "k8sServiceHost" . }} - name: KUBERNETES_SERVICE_PORT - value: {{ .Values.k8sServicePort | quote }} + value: {{ include "k8sServicePort" . }} {{- end }} {{- if .Values.azure.enabled }} {{- if .Values.azure.subscriptionID }} @@ -171,12 +173,16 @@ spec: ports: - name: prometheus containerPort: {{ .Values.operator.prometheus.port }} + {{- if .Values.operator.hostNetwork }} hostPort: {{ .Values.operator.prometheus.port }} + {{- end }} protocol: TCP {{- end }} livenessProbe: httpGet: + {{- if .Values.operator.hostNetwork }} host: {{ .Values.ipv4.enabled | ternary "127.0.0.1" "::1" | quote }} + {{- end }} path: /healthz port: 9234 scheme: HTTP @@ -185,7 +191,9 @@ spec: timeoutSeconds: 3 readinessProbe: httpGet: + {{- if .Values.operator.hostNetwork }} host: {{ .Values.ipv4.enabled | ternary "127.0.0.1" "::1" | quote }} + {{- end }} path: /healthz port: 9234 scheme: HTTP @@ -201,12 +209,17 @@ spec: - name: etcd-config-path mountPath: /var/lib/etcd-config readOnly: true - {{- if or .Values.etcd.ssl .Values.etcd.managed }} + {{- if .Values.etcd.ssl }} - name: etcd-secrets mountPath: /var/lib/etcd-secrets readOnly: true {{- end }} {{- end }} + {{- if .Values.clustermesh.enableEndpointSliceSynchronization }} + - name: clustermesh-secrets + mountPath: /var/lib/cilium/clustermesh + readOnly: true + {{- end }} {{- if .Values.kubeConfigPath }} - name: kube-config mountPath: {{ .Values.kubeConfigPath }} @@ -242,17 +255,12 @@ spec: {{- toYaml . | trim | nindent 10 }} {{- end }} terminationMessagePolicy: FallbackToLogsOnError - hostNetwork: true - {{- if and .Values.etcd.managed (not .Values.etcd.k8sService) }} - # In managed etcd mode, Cilium must be able to resolve the DNS name of - # the etcd service - dnsPolicy: ClusterFirstWithHostNet - {{- else if .Values.operator.dnsPolicy }} + hostNetwork: {{ .Values.operator.hostNetwork }} + {{- if .Values.operator.dnsPolicy }} dnsPolicy: {{ .Values.operator.dnsPolicy }} {{- end }} restartPolicy: Always priorityClassName: {{ include "cilium.priorityClass" (list $ .Values.operator.priorityClassName "system-cluster-critical") }} - serviceAccount: {{ .Values.serviceAccounts.operator.name | quote }} serviceAccountName: {{ .Values.serviceAccounts.operator.name | quote }} automountServiceAccountToken: {{ .Values.serviceAccounts.operator.automount }} {{- with .Values.operator.affinity }} @@ -277,6 +285,15 @@ spec: nodeSelector: {{- toYaml . | trim | nindent 8 }} {{- end }} + {{- if and .Values.clustermesh.enableEndpointSliceSynchronization .Values.clustermesh.config.enabled (not (and .Values.clustermesh.useAPIServer .Values.clustermesh.apiserver.kvstoremesh.enabled )) }} + hostAliases: + {{- range $cluster := .Values.clustermesh.config.clusters }} + {{- range $ip := $cluster.ips }} + - ip: {{ $ip }} + hostnames: [ "{{ $cluster.name }}.{{ $.Values.clustermesh.config.domain }}" ] + {{- end }} + {{- end }} + {{- end }} {{- with .Values.operator.tolerations }} tolerations: {{- toYaml . | trim | nindent 8 }} @@ -296,7 +313,7 @@ spec: items: - key: etcd-config path: etcd.config - {{- if or .Values.etcd.ssl .Values.etcd.managed }} + {{- if .Values.etcd.ssl }} # To read the k8s etcd secrets in case the user might want to use TLS - name: etcd-secrets secret: @@ -334,4 +351,38 @@ spec: {{- with .Values.operator.extraVolumes }} {{- toYaml . | nindent 6 }} {{- end }} + {{- if .Values.clustermesh.enableEndpointSliceSynchronization }} + # To read the clustermesh configuration + - name: clustermesh-secrets + projected: + # note: the leading zero means this number is in octal representation: do not remove it + defaultMode: 0400 + sources: + - secret: + name: cilium-clustermesh + optional: true + # note: items are not explicitly listed here, since the entries of this secret + # depend on the peers configured, and that would cause a restart of all agents + # at every addition/removal. Leaving the field empty makes each secret entry + # to be automatically projected into the volume as a file whose name is the key. + - secret: + name: clustermesh-apiserver-remote-cert + optional: true + items: + - key: tls.key + path: common-etcd-client.key + - key: tls.crt + path: common-etcd-client.crt + {{- if not .Values.tls.caBundle.enabled }} + - key: ca.crt + path: common-etcd-client-ca.crt + {{- else }} + - {{ .Values.tls.caBundle.useSecret | ternary "secret" "configMap" }}: + name: {{ .Values.tls.caBundle.name }} + optional: true + items: + - key: {{ .Values.tls.caBundle.key }} + path: common-etcd-client-ca.crt + {{- end }} + {{- end }} {{- end }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-operator/poddisruptionbudget.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-operator/poddisruptionbudget.yaml index a224b9e6c..05b251046 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-operator/poddisruptionbudget.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-operator/poddisruptionbudget.yaml @@ -1,6 +1,6 @@ {{- if and .Values.operator.enabled .Values.operator.podDisruptionBudget.enabled }} {{- $component := .Values.operator.podDisruptionBudget }} -apiVersion: {{ include "podDisruptionBudget.apiVersion" . }} +apiVersion: policy/v1 kind: PodDisruptionBudget metadata: name: cilium-operator diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-preflight/clusterrole.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-preflight/clusterrole.yaml index d2a8f4ca8..bb9b686ec 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-preflight/clusterrole.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-preflight/clusterrole.yaml @@ -1,4 +1,4 @@ -{{- if .Values.preflight.enabled }} +{{- if and .Values.preflight.enabled .Values.rbac.create }} {{- /* Keep file in sync with cilium-agent/clusterrole.yaml */ -}} @@ -41,6 +41,15 @@ rules: - get - list - watch +{{- if and .Values.hubble.enabled .Values.hubble.dropEventEmitter.enabled }} +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +{{- end }} {{- if .Values.annotateK8sNode }} - apiGroups: - "" @@ -139,8 +148,6 @@ rules: - apiGroups: - cilium.io resources: - - ciliumnetworkpolicies/status - - ciliumclusterwidenetworkpolicies/status - ciliumendpoints/status - ciliumendpoints - ciliuml2announcementpolicies/status diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-preflight/clusterrolebinding.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-preflight/clusterrolebinding.yaml index 1237d6ac7..5b60e5192 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-preflight/clusterrolebinding.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-preflight/clusterrolebinding.yaml @@ -1,4 +1,4 @@ -{{- if and .Values.preflight.enabled .Values.serviceAccounts.preflight.create }} +{{- if and .Values.preflight.enabled .Values.serviceAccounts.preflight.create .Values.rbac.create }} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-preflight/daemonset.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-preflight/daemonset.yaml index b0f646dc3..51cfe1ae0 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-preflight/daemonset.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-preflight/daemonset.yaml @@ -86,7 +86,7 @@ spec: - name: etcd-config-path mountPath: /var/lib/etcd-config readOnly: true - {{- if or .Values.etcd.ssl .Values.etcd.managed }} + {{- if .Values.etcd.ssl }} - name: etcd-secrets mountPath: /var/lib/etcd-secrets readOnly: true @@ -132,11 +132,9 @@ spec: env: {{- if .Values.k8sServiceHost }} - name: KUBERNETES_SERVICE_HOST - value: {{ .Values.k8sServiceHost | quote }} - {{- end }} - {{- if .Values.k8sServicePort }} + value: {{ include "k8sServiceHost" . }} - name: KUBERNETES_SERVICE_PORT - value: {{ .Values.k8sServicePort | quote }} + value: {{ include "k8sServicePort" . }} {{- end }} volumeMounts: - name: cilium-run @@ -145,7 +143,7 @@ spec: - name: etcd-config-path mountPath: /var/lib/etcd-config readOnly: true - {{- if or .Values.etcd.ssl .Values.etcd.managed }} + {{- if .Values.etcd.ssl }} - name: etcd-secrets mountPath: /var/lib/etcd-secrets readOnly: true @@ -168,15 +166,9 @@ spec: terminationMessagePolicy: FallbackToLogsOnError {{- end }} hostNetwork: true - # This is here to seamlessly allow migrate-identity to work with - # etcd-operator setups. The assumption is that other cases would also - # work since the cluster DNS would forward the request on. - # This differs from the cilium-agent daemonset, where this is only - # enabled when etcd.managed=true dnsPolicy: ClusterFirstWithHostNet restartPolicy: Always priorityClassName: {{ include "cilium.priorityClass" (list $ .Values.preflight.priorityClassName "system-node-critical") }} - serviceAccount: {{ .Values.serviceAccounts.preflight.name | quote }} serviceAccountName: {{ .Values.serviceAccounts.preflight.name | quote }} automountServiceAccountToken: {{ .Values.serviceAccounts.preflight.automount }} terminationGracePeriodSeconds: {{ .Values.preflight.terminationGracePeriodSeconds }} @@ -209,7 +201,7 @@ spec: - key: etcd-config path: etcd.config # To read the k8s etcd secrets in case the user might want to use TLS - {{- if or .Values.etcd.ssl .Values.etcd.managed }} + {{- if .Values.etcd.ssl }} - name: etcd-secrets secret: secretName: cilium-etcd-secrets diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-preflight/deployment.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-preflight/deployment.yaml index af0a31baa..32c169b42 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-preflight/deployment.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-preflight/deployment.yaml @@ -46,20 +46,13 @@ spec: cilium-dbg preflight validate-cnp; touch /tmp/ready-validate-cnp; sleep 1h; - livenessProbe: - exec: - command: - - cat - - /tmp/ready-validate-cnp - initialDelaySeconds: 5 - periodSeconds: 5 readinessProbe: exec: command: - cat - /tmp/ready-validate-cnp - initialDelaySeconds: 5 - periodSeconds: 5 + initialDelaySeconds: {{ .Values.preflight.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.preflight.readinessProbe.periodSeconds }} {{- with .Values.preflight.extraVolumeMounts }} volumeMounts: {{- toYaml . | nindent 10 }} @@ -67,11 +60,9 @@ spec: env: {{- if .Values.k8sServiceHost }} - name: KUBERNETES_SERVICE_HOST - value: {{ .Values.k8sServiceHost | quote }} - {{- end }} - {{- if .Values.k8sServicePort }} + value: {{ include "k8sServiceHost" . }} - name: KUBERNETES_SERVICE_PORT - value: {{ .Values.k8sServicePort | quote }} + value: {{ include "k8sServicePort" . }} {{- end }} {{- with .Values.preflight.extraEnv }} {{- toYaml . | trim | nindent 10 }} @@ -88,7 +79,6 @@ spec: hostNetwork: true restartPolicy: Always priorityClassName: {{ include "cilium.priorityClass" (list $ .Values.preflight.priorityClassName "system-cluster-critical") }} - serviceAccount: {{ .Values.serviceAccounts.preflight.name | quote }} serviceAccountName: {{ .Values.serviceAccounts.preflight.name | quote }} automountServiceAccountToken: {{ .Values.serviceAccounts.preflight.automount }} terminationGracePeriodSeconds: {{ .Values.preflight.terminationGracePeriodSeconds }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-preflight/poddisruptionbudget.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-preflight/poddisruptionbudget.yaml index 4b3c7cb0d..c00d9b896 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/cilium-preflight/poddisruptionbudget.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/cilium-preflight/poddisruptionbudget.yaml @@ -1,6 +1,6 @@ {{- if and .Values.preflight.enabled .Values.preflight.validateCNPs .Values.preflight.podDisruptionBudget.enabled }} {{- $component := .Values.preflight.podDisruptionBudget }} -apiVersion: {{ include "podDisruptionBudget.apiVersion" . }} +apiVersion: policy/v1 kind: PodDisruptionBudget metadata: name: cilium-pre-flight-check diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/_helpers.tpl b/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/_helpers.tpl index 19cce1f78..a90bccaa5 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/_helpers.tpl +++ b/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/_helpers.tpl @@ -2,6 +2,10 @@ admin-{{ .Values.cluster.name }} {{- end -}} +{{- define "clustermesh-apiserver-generate-certs.local-common-name" -}} +local-{{ .Values.cluster.name }} +{{- end -}} + {{- define "clustermesh-apiserver-generate-certs.remote-common-name" -}} {{- if eq .Values.clustermesh.apiserver.tls.authMode "cluster" -}} remote-{{ .Values.cluster.name }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/clusterrole.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/clusterrole.yaml index c5df0163b..55d2505df 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/clusterrole.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/clusterrole.yaml @@ -1,4 +1,4 @@ -{{- if and (or .Values.externalWorkloads.enabled .Values.clustermesh.useAPIServer) .Values.serviceAccounts.clustermeshApiserver.create }} +{{- if and (or .Values.externalWorkloads.enabled .Values.clustermesh.useAPIServer) .Values.serviceAccounts.clustermeshApiserver.create .Values.rbac.create }} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -10,6 +10,7 @@ metadata: {{- toYaml . | nindent 4 }} {{- end }} rules: +{{- if .Values.externalWorkloads.enabled }} - apiGroups: - cilium.io resources: @@ -33,11 +34,14 @@ rules: - ciliumendpoints/status verbs: - patch +{{- end }} - apiGroups: - cilium.io resources: - ciliumidentities +{{- if .Values.externalWorkloads.enabled }} - ciliumexternalworkloads +{{- end }} - ciliumendpoints - ciliumnodes verbs: diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/clusterrolebinding.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/clusterrolebinding.yaml index edc3cf9ca..49b19097f 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/clusterrolebinding.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/clusterrolebinding.yaml @@ -1,4 +1,4 @@ -{{- if and (or .Values.externalWorkloads.enabled .Values.clustermesh.useAPIServer) .Values.serviceAccounts.clustermeshApiserver.create }} +{{- if and (or .Values.externalWorkloads.enabled .Values.clustermesh.useAPIServer) .Values.serviceAccounts.clustermeshApiserver.create .Values.rbac.create }} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/deployment.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/deployment.yaml index 6c5e6c3ec..6caee2838 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/deployment.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/deployment.yaml @@ -176,11 +176,13 @@ spec: {{- end }} - --cluster-name=$(CLUSTER_NAME) - --cluster-id=$(CLUSTER_ID) - - --kvstore-opt - - etcd.config=/var/lib/cilium/etcd-config.yaml + - --kvstore-opt=etcd.config=/var/lib/cilium/etcd-config.yaml + - --kvstore-opt=etcd.qps=20 + - --kvstore-opt=etcd.bootstrapQps=10000 {{- if hasKey .Values.clustermesh "maxConnectedClusters" }} - --max-connected-clusters={{ .Values.clustermesh.maxConnectedClusters }} {{- end }} + - --health-port={{ .Values.clustermesh.apiserver.healthPort }} {{- if ne .Values.clustermesh.apiserver.tls.authMode "legacy" }} - --cluster-users-enabled - --cluster-users-config-path=/var/lib/cilium/etcd-config/users.yaml @@ -205,22 +207,27 @@ spec: name: cilium-config key: cluster-id optional: true - - name: IDENTITY_ALLOCATION_MODE - valueFrom: - configMapKeyRef: - name: cilium-config - key: identity-allocation-mode - name: ENABLE_K8S_ENDPOINT_SLICE valueFrom: configMapKeyRef: name: cilium-config key: enable-k8s-endpoint-slice optional: true + readinessProbe: + httpGet: + path: /readyz + port: apiserv-health + {{- with .Values.clustermesh.apiserver.readinessProbe }} + {{- toYaml . | trim | nindent 10 }} + {{- end }} {{- with .Values.clustermesh.apiserver.extraEnv }} {{- toYaml . | trim | nindent 8 }} {{- end }} - {{- if .Values.clustermesh.apiserver.metrics.enabled }} ports: + - name: apiserv-health + containerPort: {{ .Values.clustermesh.apiserver.healthPort }} + protocol: TCP + {{- if .Values.clustermesh.apiserver.metrics.enabled }} - name: apiserv-metrics containerPort: {{ .Values.clustermesh.apiserver.metrics.port }} protocol: TCP @@ -265,11 +272,13 @@ spec: - --cluster-id=$(CLUSTER_ID) - --kvstore-opt=etcd.config=/var/lib/cilium/etcd-config.yaml - --kvstore-opt=etcd.qps=100 + - --kvstore-opt=etcd.bootstrapQps=10000 - --kvstore-opt=etcd.maxInflight=10 - --clustermesh-config=/var/lib/cilium/clustermesh {{- if hasKey .Values.clustermesh "maxConnectedClusters" }} - --max-connected-clusters={{ .Values.clustermesh.maxConnectedClusters }} {{- end }} + - --health-port={{ .Values.clustermesh.apiserver.kvstoremesh.healthPort }} {{- if .Values.clustermesh.apiserver.metrics.kvstoremesh.enabled }} - --prometheus-serve-addr=:{{ .Values.clustermesh.apiserver.metrics.kvstoremesh.port }} - --controller-group-metrics=all @@ -277,6 +286,13 @@ spec: {{- with .Values.clustermesh.apiserver.kvstoremesh.extraArgs }} {{- toYaml . | trim | nindent 8 }} {{- end }} + readinessProbe: + httpGet: + path: /readyz + port: kvmesh-health + {{- with .Values.clustermesh.apiserver.kvstoremesh.readinessProbe }} + {{- toYaml . | trim | nindent 10 }} + {{- end }} env: - name: CLUSTER_NAME valueFrom: @@ -291,8 +307,11 @@ spec: {{- with .Values.clustermesh.apiserver.kvstoremesh.extraEnv }} {{- toYaml . | trim | nindent 8 }} {{- end }} - {{- if .Values.clustermesh.apiserver.metrics.kvstoremesh.enabled }} ports: + - name: kvmesh-health + containerPort: {{ .Values.clustermesh.apiserver.kvstoremesh.healthPort }} + protocol: TCP + {{- if .Values.clustermesh.apiserver.metrics.kvstoremesh.enabled }} - name: kvmesh-metrics containerPort: {{ .Values.clustermesh.apiserver.metrics.kvstoremesh.port }} protocol: TCP @@ -374,7 +393,8 @@ spec: defaultMode: 0400 {{- end }} - name: etcd-data-dir - emptyDir: {} + emptyDir: + medium: {{ ternary "Memory" "" (eq .Values.clustermesh.apiserver.etcd.storageMedium "Memory") | quote }} {{- if .Values.clustermesh.apiserver.kvstoremesh.enabled }} - name: kvstoremesh-secrets projected: @@ -404,7 +424,6 @@ spec: {{- end }} restartPolicy: Always priorityClassName: {{ include "cilium.priorityClass" (list $ .Values.clustermesh.apiserver.priorityClassName "system-cluster-critical") }} - serviceAccount: {{ .Values.serviceAccounts.clustermeshApiserver.name | quote }} serviceAccountName: {{ .Values.serviceAccounts.clustermeshApiserver.name | quote }} terminationGracePeriodSeconds: {{ .Values.clustermesh.apiserver.terminationGracePeriodSeconds }} automountServiceAccountToken: {{ .Values.serviceAccounts.clustermeshApiserver.automount }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/poddisruptionbudget.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/poddisruptionbudget.yaml index 4a1bbf7e0..a5d30b7b1 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/poddisruptionbudget.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/poddisruptionbudget.yaml @@ -1,6 +1,6 @@ {{- if and (or .Values.externalWorkloads.enabled .Values.clustermesh.useAPIServer) .Values.clustermesh.apiserver.podDisruptionBudget.enabled }} {{- $component := .Values.clustermesh.apiserver.podDisruptionBudget }} -apiVersion: {{ include "podDisruptionBudget.apiVersion" . }} +apiVersion: policy/v1 kind: PodDisruptionBudget metadata: name: clustermesh-apiserver diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/service.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/service.yaml index 0a7028c54..76c33ff13 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/service.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/service.yaml @@ -26,6 +26,9 @@ spec: {{- if and (eq "NodePort" .Values.clustermesh.apiserver.service.type) .Values.clustermesh.apiserver.service.nodePort }} nodePort: {{ .Values.clustermesh.apiserver.service.nodePort }} {{- end }} + {{- if and (eq "LoadBalancer" .Values.clustermesh.apiserver.service.type) .Values.clustermesh.apiserver.service.loadBalancerClass }} + loadBalancerClass: {{ .Values.clustermesh.apiserver.service.loadBalancerClass }} + {{- end }} {{- if and (eq "LoadBalancer" .Values.clustermesh.apiserver.service.type) .Values.clustermesh.apiserver.service.loadBalancerIP }} loadBalancerIP: {{ .Values.clustermesh.apiserver.service.loadBalancerIP }} {{- end }} @@ -35,4 +38,7 @@ spec: {{- if .Values.clustermesh.apiserver.service.internalTrafficPolicy }} internalTrafficPolicy: {{ .Values.clustermesh.apiserver.service.internalTrafficPolicy }} {{- end }} + {{- if or (eq .Values.clustermesh.apiserver.service.enableSessionAffinity "Always") (and (eq .Values.clustermesh.apiserver.service.enableSessionAffinity "HAOnly") (gt (int .Values.clustermesh.apiserver.replicas) 1)) }} + sessionAffinity: ClientIP + {{- end }} {{- end }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-certmanager/admin-secret.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-certmanager/admin-secret.yaml index 097cb14e0..dbf313a56 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-certmanager/admin-secret.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-certmanager/admin-secret.yaml @@ -14,7 +14,5 @@ spec: {{- toYaml .Values.clustermesh.apiserver.tls.auto.certManagerIssuerRef | nindent 4 }} secretName: clustermesh-apiserver-admin-cert commonName: {{ include "clustermesh-apiserver-generate-certs.admin-common-name" . }} - dnsNames: - - localhost duration: {{ printf "%dh0m0s" (mul .Values.clustermesh.apiserver.tls.auto.certValidityDuration 24) }} {{- end }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-certmanager/local-secret.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-certmanager/local-secret.yaml new file mode 100644 index 000000000..8ec9fa53d --- /dev/null +++ b/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-certmanager/local-secret.yaml @@ -0,0 +1,18 @@ +{{- if and .Values.clustermesh.useAPIServer .Values.clustermesh.apiserver.kvstoremesh.enabled .Values.clustermesh.apiserver.tls.auto.enabled (eq .Values.clustermesh.apiserver.tls.auto.method "certmanager") }} +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: clustermesh-apiserver-local-cert + namespace: {{ .Release.Namespace }} + {{- with .Values.clustermesh.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + issuerRef: + {{- toYaml .Values.clustermesh.apiserver.tls.auto.certManagerIssuerRef | nindent 4 }} + secretName: clustermesh-apiserver-local-cert + commonName: {{ include "clustermesh-apiserver-generate-certs.local-common-name" . }} + duration: {{ printf "%dh0m0s" (mul .Values.clustermesh.apiserver.tls.auto.certValidityDuration 24) }} +{{- end }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-cronjob/_job-spec.tpl b/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-cronjob/_job-spec.tpl index 52a990896..52d859b1b 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-cronjob/_job-spec.tpl +++ b/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-cronjob/_job-spec.tpl @@ -1,9 +1,5 @@ {{- define "clustermesh-apiserver-generate-certs.job.spec" }} -{{- $certValiditySecondsStr := printf "%ds" (mul .Values.clustermesh.apiserver.tls.auto.certValidityDuration 24 60 60) -}} -{{- $clustermeshServerSANs := concat (list "*.mesh.cilium.io" (printf "clustermesh-apiserver.%s.svc" .Release.Namespace)) - .Values.clustermesh.apiserver.tls.server.extraDnsNames - .Values.clustermesh.apiserver.tls.server.extraIpAddresses --}} +{{- $certValidityStr := printf "%dh" (mul .Values.clustermesh.apiserver.tls.auto.certValidityDuration 24) -}} spec: template: metadata: @@ -20,30 +16,76 @@ spec: command: - "/usr/bin/cilium-certgen" args: - - "--cilium-namespace={{ .Release.Namespace }}" {{- if .Values.debug.enabled }} - "--debug" {{- end }} - "--ca-generate" - "--ca-reuse-secret" - {{- if and .Values.tls.ca.cert .Values.tls.ca.key }} + - "--ca-secret-namespace={{ .Release.Namespace }}" - "--ca-secret-name=cilium-ca" - {{- end }} - - "--clustermesh-apiserver-server-cert-generate" - - "--clustermesh-apiserver-server-cert-validity-duration={{ $certValiditySecondsStr }}" - - "--clustermesh-apiserver-server-cert-sans={{ join "," $clustermeshServerSANs }}" - - "--clustermesh-apiserver-admin-cert-generate" - - "--clustermesh-apiserver-admin-cert-validity-duration={{ $certValiditySecondsStr }}" - - "--clustermesh-apiserver-admin-cert-common-name={{ include "clustermesh-apiserver-generate-certs.admin-common-name" . }}" - {{- if .Values.externalWorkloads.enabled }} - - "--clustermesh-apiserver-client-cert-generate" - - "--clustermesh-apiserver-client-cert-validity-duration={{ $certValiditySecondsStr }}" - {{- end }} - {{- if .Values.clustermesh.useAPIServer }} - - "--clustermesh-apiserver-remote-cert-generate" - - "--clustermesh-apiserver-remote-cert-validity-duration={{ $certValiditySecondsStr }}" - - "--clustermesh-apiserver-remote-cert-common-name={{ include "clustermesh-apiserver-generate-certs.remote-common-name" . }}" - {{- end }} + - "--ca-common-name=Cilium CA" + env: + - name: CILIUM_CERTGEN_CONFIG + value: | + certs: + - name: clustermesh-apiserver-server-cert + namespace: {{ .Release.Namespace }} + commonName: "clustermesh-apiserver.cilium.io" + hosts: + - "clustermesh-apiserver.cilium.io" + - "*.mesh.cilium.io" + - "clustermesh-apiserver.{{ .Release.Namespace }}.svc" + {{- range $dns := .Values.clustermesh.apiserver.tls.server.extraDnsNames }} + - {{ $dns | quote }} + {{- end }} + - "127.0.0.1" + - "::1" + {{- range $ip := .Values.clustermesh.apiserver.tls.server.extraIpAddresses }} + - {{ $ip | quote }} + {{- end }} + usage: + - signing + - key encipherment + - server auth + validity: {{ $certValidityStr }} + - name: clustermesh-apiserver-admin-cert + namespace: {{ .Release.Namespace }} + commonName: {{ include "clustermesh-apiserver-generate-certs.admin-common-name" . | quote }} + usage: + - signing + - key encipherment + - client auth + validity: {{ $certValidityStr }} + {{- if .Values.clustermesh.useAPIServer }} + - name: clustermesh-apiserver-remote-cert + namespace: {{ .Release.Namespace }} + commonName: {{ include "clustermesh-apiserver-generate-certs.remote-common-name" . | quote }} + usage: + - signing + - key encipherment + - client auth + validity: {{ $certValidityStr }} + {{- end }} + {{- if and .Values.clustermesh.useAPIServer .Values.clustermesh.apiserver.kvstoremesh.enabled }} + - name: clustermesh-apiserver-local-cert + namespace: {{ .Release.Namespace }} + commonName: {{ include "clustermesh-apiserver-generate-certs.local-common-name" . | quote }} + usage: + - signing + - key encipherment + - client auth + validity: {{ $certValidityStr }} + {{- end }} + {{- if .Values.externalWorkloads.enabled }} + - name: clustermesh-apiserver-client-cert + namespace: {{ .Release.Namespace }} + commonName: "externalworkload" + usage: + - signing + - key encipherment + - client auth + validity: {{ $certValidityStr }} + {{- end }} {{- with .Values.certgen.extraVolumeMounts }} volumeMounts: {{- toYaml . | nindent 10 }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-cronjob/cronjob.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-cronjob/cronjob.yaml index 946602b40..8c0e4cd5c 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-cronjob/cronjob.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-cronjob/cronjob.yaml @@ -1,5 +1,5 @@ {{- if and (or .Values.externalWorkloads.enabled .Values.clustermesh.useAPIServer) .Values.clustermesh.apiserver.tls.auto.enabled (eq .Values.clustermesh.apiserver.tls.auto.method "cronJob") .Values.clustermesh.apiserver.tls.auto.schedule }} -apiVersion: {{ include "cronjob.apiVersion" . }} +apiVersion: batch/v1 kind: CronJob metadata: name: clustermesh-apiserver-generate-certs diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-cronjob/role.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-cronjob/role.yaml index 79960cba2..a3a9f0893 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-cronjob/role.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-cronjob/role.yaml @@ -34,6 +34,7 @@ rules: - clustermesh-apiserver-server-cert - clustermesh-apiserver-admin-cert - clustermesh-apiserver-remote-cert + - clustermesh-apiserver-local-cert - clustermesh-apiserver-client-cert verbs: - update diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-helm/admin-secret.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-helm/admin-secret.yaml index 63282c98e..59b5b51c7 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-helm/admin-secret.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-helm/admin-secret.yaml @@ -1,8 +1,7 @@ {{- if and (or .Values.externalWorkloads.enabled .Values.clustermesh.useAPIServer) .Values.clustermesh.apiserver.tls.auto.enabled (eq .Values.clustermesh.apiserver.tls.auto.method "helm") }} {{- $_ := include "cilium.ca.setup" . -}} {{- $cn := include "clustermesh-apiserver-generate-certs.admin-common-name" . -}} -{{- $dns := list "localhost" }} -{{- $cert := genSignedCert $cn nil $dns (.Values.clustermesh.apiserver.tls.auto.certValidityDuration | int) .commonCA -}} +{{- $cert := genSignedCert $cn nil nil (.Values.clustermesh.apiserver.tls.auto.certValidityDuration | int) .commonCA -}} --- apiVersion: v1 kind: Secret diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-helm/local-secret.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-helm/local-secret.yaml new file mode 100644 index 000000000..716ab8164 --- /dev/null +++ b/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-helm/local-secret.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.clustermesh.useAPIServer .Values.clustermesh.apiserver.kvstoremesh.enabled .Values.clustermesh.apiserver.tls.auto.enabled (eq .Values.clustermesh.apiserver.tls.auto.method "helm") }} +{{- $_ := include "cilium.ca.setup" . -}} +{{- $cn := include "clustermesh-apiserver-generate-certs.local-common-name" . -}} +{{- $cert := genSignedCert $cn nil nil (.Values.clustermesh.apiserver.tls.auto.certValidityDuration | int) .commonCA -}} +--- +apiVersion: v1 +kind: Secret +metadata: + name: clustermesh-apiserver-local-cert + namespace: {{ .Release.Namespace }} + {{- with .Values.clustermesh.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + ca.crt: {{ .commonCA.Cert | b64enc }} + tls.crt: {{ $cert.Cert | b64enc }} + tls.key: {{ $cert.Key | b64enc }} +{{- end }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-provided/admin-secret.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-provided/admin-secret.yaml index de038ca87..68c454519 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-provided/admin-secret.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-provided/admin-secret.yaml @@ -1,4 +1,5 @@ {{- if and (or .Values.externalWorkloads.enabled .Values.clustermesh.useAPIServer) (not .Values.clustermesh.apiserver.tls.auto.enabled) }} +{{- if .Values.clustermesh.apiserver.tls.enableSecrets }} apiVersion: v1 kind: Secret metadata: @@ -14,3 +15,4 @@ data: tls.crt: {{ .Values.clustermesh.apiserver.tls.admin.cert | required "missing clustermesh.apiserver.tls.admin.cert" }} tls.key: {{ .Values.clustermesh.apiserver.tls.admin.key | required "missing clustermesh.apiserver.tls.admin.key" }} {{- end }} +{{- end }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-provided/client-secret.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-provided/client-secret.yaml index b8ea7b604..f48d6604a 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-provided/client-secret.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-provided/client-secret.yaml @@ -1,4 +1,5 @@ {{- if and .Values.externalWorkloads.enabled (not .Values.clustermesh.apiserver.tls.auto.enabled) }} +{{- if .Values.clustermesh.apiserver.tls.enableSecrets }} apiVersion: v1 kind: Secret metadata: @@ -14,3 +15,4 @@ data: tls.crt: {{ .Values.clustermesh.apiserver.tls.client.cert | required "missing clustermesh.apiserver.tls.client.cert" }} tls.key: {{ .Values.clustermesh.apiserver.tls.client.key | required "missing clustermesh.apiserver.tls.client.key" }} {{- end }} +{{- end }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-provided/remote-secret.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-provided/remote-secret.yaml index 8a253eb56..3e6f21f11 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-provided/remote-secret.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-provided/remote-secret.yaml @@ -1,4 +1,5 @@ {{- if and .Values.clustermesh.useAPIServer (not .Values.clustermesh.apiserver.tls.auto.enabled) }} +{{- if .Values.clustermesh.apiserver.tls.enableSecrets }} apiVersion: v1 kind: Secret metadata: @@ -14,3 +15,4 @@ data: tls.crt: {{ .Values.clustermesh.apiserver.tls.remote.cert | required "missing clustermesh.apiserver.tls.remote.cert" }} tls.key: {{ .Values.clustermesh.apiserver.tls.remote.key | required "missing clustermesh.apiserver.tls.remote.key" }} {{- end }} +{{- end }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-provided/server-secret.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-provided/server-secret.yaml index 8c239b070..5af1b9bd5 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-provided/server-secret.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/tls-provided/server-secret.yaml @@ -1,4 +1,5 @@ {{- if and (or .Values.externalWorkloads.enabled .Values.clustermesh.useAPIServer) (not .Values.clustermesh.apiserver.tls.auto.enabled) }} +{{- if .Values.clustermesh.apiserver.tls.enableSecrets }} apiVersion: v1 kind: Secret metadata: @@ -14,3 +15,4 @@ data: tls.crt: {{ .Values.clustermesh.apiserver.tls.server.cert | required "missing clustermesh.apiserver.tls.server.cert" }} tls.key: {{ .Values.clustermesh.apiserver.tls.server.key | required "missing clustermesh.apiserver.tls.server.key" }} {{- end }} +{{- end }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/users-configmap.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/users-configmap.yaml index e1cbf95f7..ab42ad068 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/users-configmap.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/clustermesh-apiserver/users-configmap.yaml @@ -1,4 +1,7 @@ -{{- if ne .Values.clustermesh.apiserver.tls.authMode "legacy" }} +{{- if and + (or .Values.externalWorkloads.enabled .Values.clustermesh.useAPIServer) + (ne .Values.clustermesh.apiserver.tls.authMode "legacy") +}} --- apiVersion: v1 kind: ConfigMap diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/etcd-operator/cilium-etcd-operator-clusterrole.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/etcd-operator/cilium-etcd-operator-clusterrole.yaml deleted file mode 100644 index 22a81c2c2..000000000 --- a/argocd-helm-charts/cilium/charts/cilium/templates/etcd-operator/cilium-etcd-operator-clusterrole.yaml +++ /dev/null @@ -1,79 +0,0 @@ -{{- if .Values.etcd.managed }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: cilium-etcd-operator - {{- with .Values.etcd.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} - labels: - app.kubernetes.io/part-of: cilium -rules: -- apiGroups: - - etcd.database.coreos.com - resources: - - etcdclusters - verbs: - - get - - delete - - create - - update -- apiGroups: - - apiextensions.k8s.io - resources: - - customresourcedefinitions - verbs: - - delete - - get - - create -- apiGroups: - - "" - resources: - - deployments - verbs: - - delete - - create - - get - - update -- apiGroups: - - "" - resources: - - pods - verbs: - - list - - get - - delete -- apiGroups: - - apps - resources: - - deployments - verbs: - - delete - - create - - get - - update -- apiGroups: - - "" - resources: - - componentstatuses - verbs: - - get -- apiGroups: - - extensions - resources: - - deployments - verbs: - - delete - - create - - get - - update -- apiGroups: - - "" - resources: - - secrets - verbs: - - get - - create - - delete -{{- end }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/etcd-operator/cilium-etcd-operator-clusterrolebinding.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/etcd-operator/cilium-etcd-operator-clusterrolebinding.yaml deleted file mode 100644 index 25a92e100..000000000 --- a/argocd-helm-charts/cilium/charts/cilium/templates/etcd-operator/cilium-etcd-operator-clusterrolebinding.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{{- if and .Values.etcd.managed .Values.serviceAccounts.etcd.create }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: cilium-etcd-operator - {{- with .Values.etcd.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} - labels: - app.kubernetes.io/part-of: cilium -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cilium-etcd-operator -subjects: -- kind: ServiceAccount - name: {{ .Values.serviceAccounts.etcd.name | quote }} - namespace: {{ .Release.Namespace }} -{{- end }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/etcd-operator/cilium-etcd-operator-deployment.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/etcd-operator/cilium-etcd-operator-deployment.yaml deleted file mode 100644 index 5946219f4..000000000 --- a/argocd-helm-charts/cilium/charts/cilium/templates/etcd-operator/cilium-etcd-operator-deployment.yaml +++ /dev/null @@ -1,128 +0,0 @@ -{{- if .Values.etcd.managed }} -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - io.cilium/app: etcd-operator - name: cilium-etcd-operator - app.kubernetes.io/name: cilium-etcd-operator - app.kubernetes.io/part-of: cilium - name: cilium-etcd-operator - namespace: {{ .Release.Namespace }} - {{- with .Values.etcd.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - replicas: 1 - selector: - matchLabels: - io.cilium/app: etcd-operator - name: cilium-etcd-operator -{{- with .Values.etcd.updateStrategy }} - strategy: - {{- toYaml . | trim | nindent 4 }} -{{- end }} - template: - metadata: -{{- with .Values.etcd.podAnnotations }} - annotations: - {{- toYaml . | nindent 8 }} -{{- end }} - labels: - io.cilium/app: etcd-operator - app.kubernetes.io/part-of: cilium - app.kubernetes.io/name: cilium-etcd-operator - name: cilium-etcd-operator -{{- with .Values.etcd.podLabels }} - {{- toYaml . | nindent 8 }} -{{- end }} - spec: -{{- if .Values.etcd.affinity }} - affinity: -{{ toYaml .Values.etcd.affinity | indent 8 }} -{{- end }} -{{- with .Values.etcd.topologySpreadConstraints }} - topologySpreadConstraints: - {{- range $constraint := . }} - - {{ toYaml $constraint | nindent 8 | trim }} - {{- if not $constraint.labelSelector }} - labelSelector: - matchLabels: - io.cilium/app: etcd-operator - name: cilium-etcd-operator - {{- end }} - {{- end }} -{{- end }} -{{- if .Values.imagePullSecrets }} - imagePullSecrets: - {{ toYaml .Values.imagePullSecrets | indent 8 }} -{{- end }} -{{- with .Values.etcd.podSecurityContext }} - securityContext: - {{- toYaml . | nindent 8 }} -{{- end }} - containers: - - args: -{{- with .Values.etcd.extraArgs }} - {{- toYaml . | trim | nindent 8 }} -{{- end }} - #- --etcd-node-selector=disktype=ssd,cputype=high - command: - - /usr/bin/cilium-etcd-operator - env: - - name: CILIUM_ETCD_OPERATOR_CLUSTER_DOMAIN - value: "{{ .Values.etcd.clusterDomain }}" - - name: CILIUM_ETCD_OPERATOR_ETCD_CLUSTER_SIZE - value: "{{ .Values.etcd.clusterSize }}" - - name: CILIUM_ETCD_OPERATOR_NAMESPACE - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.namespace - - name: CILIUM_ETCD_OPERATOR_POD_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.name - - name: CILIUM_ETCD_OPERATOR_POD_UID - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.uid - - name: CILIUM_ETCD_META_ETCD_AUTO_COMPACTION_MODE - value: "revision" - - name: CILIUM_ETCD_META_ETCD_AUTO_COMPACTION_RETENTION - value: "25000" - image: {{ include "cilium.image" .Values.etcd.image | quote }} - imagePullPolicy: {{ .Values.etcd.image.pullPolicy }} - name: cilium-etcd-operator - terminationMessagePolicy: FallbackToLogsOnError - {{- with .Values.etcd.securityContext }} - securityContext: - {{- toYaml . | trim | nindent 8 }} - {{- end }} - {{- with .Values.etcd.extraVolumeMounts }} - volumeMounts: - {{- toYaml . | nindent 8 }} - {{- end }} - dnsPolicy: ClusterFirst - hostNetwork: true - priorityClassName: {{ include "cilium.priorityClass" (list $ .Values.clustermesh.apiserver.priorityClassName "system-cluster-critical") }} - restartPolicy: Always - serviceAccount: {{ .Values.serviceAccounts.etcd.name | quote }} - serviceAccountName: {{ .Values.serviceAccounts.etcd.name | quote }} - automountServiceAccountToken: {{ .Values.serviceAccounts.etcd.automount }} -{{- with .Values.etcd.nodeSelector }} - nodeSelector: - {{- toYaml . | trim | nindent 8 }} -{{- end }} -{{- with .Values.etcd.tolerations }} - tolerations: - {{- toYaml . | trim | nindent 6 }} -{{- end }} - {{- with .Values.etcd.extraVolumes }} - volumes: - {{- toYaml . | nindent 6 }} - {{- end }} -{{- end }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/etcd-operator/cilium-etcd-operator-serviceaccount.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/etcd-operator/cilium-etcd-operator-serviceaccount.yaml deleted file mode 100644 index 7d7b765a9..000000000 --- a/argocd-helm-charts/cilium/charts/cilium/templates/etcd-operator/cilium-etcd-operator-serviceaccount.yaml +++ /dev/null @@ -1,16 +0,0 @@ -{{- if and .Values.etcd.managed .Values.serviceAccounts.etcd.create }} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ .Values.serviceAccounts.etcd.name | quote }} - namespace: {{ .Release.Namespace }} - {{- if or .Values.serviceAccounts.etcd.annotations .Values.etcd.annotations }} - annotations: - {{- with .Values.etcd.annotations }} - {{- toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.serviceAccounts.etcd.annotations }} - {{- toYaml . | nindent 4 }} - {{- end }} - {{- end }} -{{- end }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/etcd-operator/etcd-operator-clusterrole.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/etcd-operator/etcd-operator-clusterrole.yaml deleted file mode 100644 index 72cb4e6e8..000000000 --- a/argocd-helm-charts/cilium/charts/cilium/templates/etcd-operator/etcd-operator-clusterrole.yaml +++ /dev/null @@ -1,60 +0,0 @@ -{{- if .Values.etcd.managed }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: etcd-operator - {{- with .Values.etcd.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} - labels: - app.kubernetes.io/part-of: cilium -rules: -- apiGroups: - - etcd.database.coreos.com - resources: - - etcdclusters - - etcdbackups - - etcdrestores - verbs: - - '*' -- apiGroups: - - apiextensions.k8s.io - resources: - - customresourcedefinitions - verbs: - - '*' -- apiGroups: - - "" - resources: - - pods - - services - - endpoints - - persistentvolumeclaims - - events - - deployments - verbs: - - '*' -- apiGroups: - - apps - resources: - - deployments - verbs: - - '*' -- apiGroups: - - extensions - resources: - - deployments - verbs: - - create - - get - - list - - patch - - update -- apiGroups: - - "" - resources: - - secrets - verbs: - - get -{{- end }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/etcd-operator/etcd-operator-clusterrolebinding.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/etcd-operator/etcd-operator-clusterrolebinding.yaml deleted file mode 100644 index b729ea715..000000000 --- a/argocd-helm-charts/cilium/charts/cilium/templates/etcd-operator/etcd-operator-clusterrolebinding.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{{- if .Values.etcd.managed }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: etcd-operator - {{- with .Values.etcd.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} - labels: - app.kubernetes.io/part-of: cilium -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: etcd-operator -subjects: -- kind: ServiceAccount - name: cilium-etcd-sa - namespace: {{ .Release.Namespace }} -{{- end }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/etcd-operator/etcd-operator-serviceaccount.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/etcd-operator/etcd-operator-serviceaccount.yaml deleted file mode 100644 index 06ee39c1b..000000000 --- a/argocd-helm-charts/cilium/charts/cilium/templates/etcd-operator/etcd-operator-serviceaccount.yaml +++ /dev/null @@ -1,16 +0,0 @@ -{{- if .Values.etcd.managed }} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: cilium-etcd-sa - namespace: {{ .Release.Namespace }} - {{- if or .Values.serviceAccounts.etcd.annotations .Values.etcd.annotations }} - annotations: - {{- with .Values.etcd.annotations }} - {{- toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.serviceAccounts.etcd.annotations }} - {{- toYaml . | nindent 4 }} - {{- end }} - {{- end }} -{{- end }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/etcd-operator/poddisruptionbudget.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/etcd-operator/poddisruptionbudget.yaml deleted file mode 100644 index 5939b4ae9..000000000 --- a/argocd-helm-charts/cilium/charts/cilium/templates/etcd-operator/poddisruptionbudget.yaml +++ /dev/null @@ -1,28 +0,0 @@ -{{- if and .Values.etcd.managed .Values.etcd.podDisruptionBudget.enabled }} -{{- $component := .Values.etcd.podDisruptionBudget }} -apiVersion: {{ include "podDisruptionBudget.apiVersion" . }} -kind: PodDisruptionBudget -metadata: - name: cilium-etcd-operator - namespace: {{ .Release.Namespace }} - {{- with .Values.etcd.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} - labels: - io.cilium/app: etcd-operator - name: cilium-etcd-operator - app.kubernetes.io/name: cilium-etcd-operator - app.kubernetes.io/part-of: cilium -spec: - {{- with $component.maxUnavailable }} - maxUnavailable: {{ . }} - {{- end }} - {{- with $component.minAvailable }} - minAvailable: {{ . }} - {{- end }} - selector: - matchLabels: - io.cilium/app: etcd-operator - name: cilium-etcd-operator -{{- end }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/hubble-relay/configmap.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/hubble-relay/configmap.yaml index 93f5b8d88..42e48a71b 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/hubble-relay/configmap.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/hubble-relay/configmap.yaml @@ -17,7 +17,7 @@ data: config.yaml: | cluster-name: {{ .Values.cluster.name }} peer-service: "hubble-peer.{{ .Release.Namespace }}.svc.{{ .Values.hubble.peerService.clusterDomain }}:{{ $peerSvcPort }}" - listen-address: {{ .Values.hubble.relay.listenHost }}:{{ .Values.hubble.relay.listenPort }} + listen-address: {{ include "hubble-relay.config.listenAddress" . }} gops: {{ .Values.hubble.relay.gops.enabled }} gops-port: {{ .Values.hubble.relay.gops.port | quote }} {{- if .Values.hubble.relay.pprof.enabled }} @@ -39,13 +39,5 @@ data: {{- else }} disable-client-tls: true {{- end }} - {{- if and .Values.hubble.tls.enabled .Values.hubble.relay.tls.server.enabled }} - tls-relay-server-cert-file: /var/lib/hubble-relay/tls/server.crt - tls-relay-server-key-file: /var/lib/hubble-relay/tls/server.key - {{- if .Values.hubble.relay.tls.server.mtls }} - tls-relay-client-ca-files: /var/lib/hubble-relay/tls/hubble-server-ca.crt - {{- end }} - {{- else }} - disable-server-tls: true - {{- end }} + {{- include "hubble-relay.config.tls" . | nindent 4 }} {{- end }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/hubble-relay/deployment.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/hubble-relay/deployment.yaml index 52b9eba5c..074e2574c 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/hubble-relay/deployment.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/hubble-relay/deployment.yaml @@ -31,6 +31,10 @@ spec: {{- with .Values.hubble.relay.podAnnotations }} {{- toYaml . | nindent 8 }} {{- end }} + {{- if and .Values.hubble.relay.prometheus.enabled (not .Values.hubble.relay.prometheus.serviceMonitor.enabled) }} + prometheus.io/port: {{ .Values.hubble.relay.prometheus.port | quote }} + prometheus.io/scrape: "true" + {{- end }} labels: k8s-app: hubble-relay app.kubernetes.io/name: hubble-relay @@ -64,33 +68,44 @@ spec: {{- end }} ports: - name: grpc - containerPort: {{ .Values.hubble.relay.listenPort }} + containerPort: {{ include "hubble-relay.config.listenPort" . }} {{- if .Values.hubble.relay.prometheus.enabled }} - name: prometheus containerPort: {{ .Values.hubble.relay.prometheus.port }} protocol: TCP {{- end }} readinessProbe: - {{- include "hubble-relay.probe" . | nindent 12 }} - {{- if semverCompare "<1.20-0" .Capabilities.KubeVersion.Version }} - # Starting from Kubernetes 1.20, we are using startupProbe instead - # of this field. - initialDelaySeconds: 5 - {{- end }} + grpc: + port: 4222 + timeoutSeconds: 3 + # livenessProbe will kill the pod, we should be very conservative + # here on failures since killing the pod should be a last resort, and + # we should provide enough time for relay to retry before killing it. livenessProbe: - {{- include "hubble-relay.probe" . | nindent 12 }} - {{- if semverCompare "<1.20-0" .Capabilities.KubeVersion.Version }} - # Starting from Kubernetes 1.20, we are using startupProbe instead - # of this field. - initialDelaySeconds: 60 - {{- end }} - {{- if semverCompare ">=1.20-0" .Capabilities.KubeVersion.Version }} + grpc: + port: 4222 + timeoutSeconds: 10 + # Give relay time to establish connections and make a few retries + # before starting livenessProbes. + initialDelaySeconds: 10 + # 10 second * 12 failures = 2 minutes of failure. + # If relay cannot become healthy after 2 minutes, then killing it + # might resolve whatever issue is occurring. + # + # 10 seconds is a reasonable retry period so we can see if it's + # failing regularly or only sporadically. + periodSeconds: 10 + failureThreshold: 12 startupProbe: - # give the relay one minute to start up - {{- include "hubble-relay.probe" . | nindent 12 }} + grpc: + port: 4222 + # Give relay time to get it's certs and establish connections and + # make a few retries before starting startupProbes. + initialDelaySeconds: 10 + # 20 * 3 seconds = 1 minute of failure before we consider startup as failed. failureThreshold: 20 + # Retry more frequently at startup so that it can be considered started more quickly. periodSeconds: 3 - {{- end }} {{- with .Values.hubble.relay.extraEnv }} env: {{- toYaml . | trim | nindent 12 }} @@ -112,9 +127,9 @@ spec: {{- toYaml . | nindent 10 }} {{- end }} terminationMessagePolicy: FallbackToLogsOnError + {{- include "hubble-relay.containers.extra" . | nindent 8 }} restartPolicy: Always priorityClassName: {{ .Values.hubble.relay.priorityClassName }} - serviceAccount: {{ .Values.serviceAccounts.relay.name | quote }} serviceAccountName: {{ .Values.serviceAccounts.relay.name | quote }} automountServiceAccountToken: {{ .Values.serviceAccounts.relay.automount }} terminationGracePeriodSeconds: {{ .Values.hubble.relay.terminationGracePeriodSeconds }} @@ -182,20 +197,7 @@ spec: {{- end }} {{- end }} {{- with .Values.hubble.relay.extraVolumes }} - {{- toYaml . | nindent 6 }} + {{- toYaml . | nindent 6}} {{- end }} -{{- end }} - -{{- define "hubble-relay.probe" }} -{{- /* This distinction can be removed once we drop support for k8s 1.23 */}} -{{- if semverCompare ">=1.24-0" .Capabilities.KubeVersion.Version -}} -grpc: - port: 4222 -{{- else }} -exec: - command: - - grpc_health_probe - - -addr=localhost:4222 -{{- end }} -timeoutSeconds: 3 + {{- include "hubble-relay.volumes.extra" . | nindent 6 }} {{- end }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/hubble-relay/poddisruptionbudget.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/hubble-relay/poddisruptionbudget.yaml index 4fd6da9ba..6162cb81d 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/hubble-relay/poddisruptionbudget.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/hubble-relay/poddisruptionbudget.yaml @@ -1,6 +1,6 @@ {{- if and .Values.hubble.enabled .Values.hubble.relay.enabled .Values.hubble.relay.podDisruptionBudget.enabled }} {{- $component := .Values.hubble.relay.podDisruptionBudget }} -apiVersion: {{ include "podDisruptionBudget.apiVersion" . }} +apiVersion: policy/v1 kind: PodDisruptionBudget metadata: name: hubble-relay diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/hubble-relay/service.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/hubble-relay/service.yaml index 39259473a..27a955195 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/hubble-relay/service.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/hubble-relay/service.yaml @@ -4,10 +4,14 @@ apiVersion: v1 metadata: name: hubble-relay namespace: {{ .Release.Namespace }} - {{- with .Values.hubble.relay.annotations }} annotations: + {{- with .Values.hubble.relay.annotations }} {{- toYaml . | nindent 4 }} - {{- end }} + {{- end }} + {{- if and .Values.hubble.relay.prometheus.enabled (not .Values.hubble.relay.prometheus.serviceMonitor.enabled) }} + prometheus.io/scrape: "true" + prometheus.io/port: {{ .Values.hubble.relay.prometheus.port | quote }} + {{- end }} labels: k8s-app: hubble-relay app.kubernetes.io/name: hubble-relay @@ -23,7 +27,7 @@ spec: {{- else }} port: {{ .Values.hubble.relay.tls.server.enabled | ternary 443 80 }} {{- end }} - targetPort: {{ .Values.hubble.relay.listenPort }} + targetPort: {{ include "hubble-relay.service.targetPort" . }} {{- if and (eq "NodePort" .Values.hubble.relay.service.type) .Values.hubble.relay.service.nodePort }} nodePort: {{ .Values.hubble.relay.service.nodePort }} {{- end }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/hubble-ui/clusterrole.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/hubble-ui/clusterrole.yaml index 5df709f76..7efa4824a 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/hubble-ui/clusterrole.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/hubble-ui/clusterrole.yaml @@ -1,4 +1,4 @@ -{{- if and (or .Values.hubble.enabled .Values.hubble.ui.standalone.enabled) .Values.hubble.ui.enabled .Values.serviceAccounts.ui.create }} +{{- if and (or .Values.hubble.enabled .Values.hubble.ui.standalone.enabled) .Values.hubble.ui.enabled .Values.serviceAccounts.ui.create .Values.rbac.create }} kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/hubble-ui/clusterrolebinding.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/hubble-ui/clusterrolebinding.yaml index d091786b2..e25aeb17f 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/hubble-ui/clusterrolebinding.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/hubble-ui/clusterrolebinding.yaml @@ -1,4 +1,4 @@ -{{- if and (or .Values.hubble.enabled .Values.hubble.ui.standalone.enabled) .Values.hubble.ui.enabled .Values.serviceAccounts.ui.create }} +{{- if and (or .Values.hubble.enabled .Values.hubble.ui.standalone.enabled) .Values.hubble.ui.enabled .Values.serviceAccounts.ui.create .Values.rbac.create }} kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/hubble-ui/deployment.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/hubble-ui/deployment.yaml index cb6bd5df7..105907a5f 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/hubble-ui/deployment.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/hubble-ui/deployment.yaml @@ -44,7 +44,6 @@ spec: {{- omit . "enabled" | toYaml | nindent 8 }} {{- end }} priorityClassName: {{ .Values.hubble.ui.priorityClassName }} - serviceAccount: {{ .Values.serviceAccounts.ui.name | quote }} serviceAccountName: {{ .Values.serviceAccounts.ui.name | quote }} automountServiceAccountToken: {{ .Values.serviceAccounts.ui.automount }} {{- with .Values.imagePullSecrets }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/hubble-ui/ingress.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/hubble-ui/ingress.yaml index 2c0ff7d3e..348e281d7 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/hubble-ui/ingress.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/hubble-ui/ingress.yaml @@ -1,6 +1,6 @@ {{- if and (or .Values.hubble.enabled .Values.hubble.ui.standalone.enabled) .Values.hubble.ui.enabled .Values.hubble.ui.ingress.enabled }} {{- $baseUrl := .Values.hubble.ui.baseUrl -}} -apiVersion: {{ template "ingress.apiVersion" . }} +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: hubble-ui @@ -35,6 +35,11 @@ spec: http: paths: - path: {{ $baseUrl | quote }} - {{- include "ingress.paths" $ | nindent 12 }} + pathType: Prefix + backend: + service: + name: hubble-ui + port: + name: http {{- end }} {{- end }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/hubble-ui/poddisruptionbudget.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/hubble-ui/poddisruptionbudget.yaml index af3b6705d..c23e3ad04 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/hubble-ui/poddisruptionbudget.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/hubble-ui/poddisruptionbudget.yaml @@ -1,6 +1,6 @@ {{- if and (or .Values.hubble.enabled .Values.hubble.ui.standalone.enabled) .Values.hubble.ui.enabled .Values.hubble.ui.podDisruptionBudget.enabled }} {{- $component := .Values.hubble.ui.podDisruptionBudget }} -apiVersion: {{ include "podDisruptionBudget.apiVersion" . }} +apiVersion: policy/v1 kind: PodDisruptionBudget metadata: name: hubble-ui diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/hubble/peer-service.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/hubble/peer-service.yaml index 7ba56456b..aec3f889a 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/hubble/peer-service.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/hubble/peer-service.yaml @@ -24,7 +24,5 @@ spec: {{- end }} protocol: TCP targetPort: {{ .Values.hubble.peerService.targetPort }} -{{- if semverCompare ">=1.22-0" .Capabilities.KubeVersion.GitVersion }} internalTrafficPolicy: Local {{- end }} -{{- end }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/hubble/servicemonitor.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/hubble/servicemonitor.yaml index 3b3ba8ba2..d1c3c3e58 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/hubble/servicemonitor.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/hubble/servicemonitor.yaml @@ -30,6 +30,14 @@ spec: interval: {{ .Values.hubble.metrics.serviceMonitor.interval | quote }} honorLabels: true path: /metrics + {{- if .Values.hubble.metrics.tls.enabled }} + {{- $cn := list (.Values.cluster.name | replace "." "-") "hubble-metrics.cilium.io" | join "." }} + {{- $tlsConfig := merge .Values.hubble.metrics.serviceMonitor.tlsConfig (dict "serverName" $cn) }} + tlsConfig: {{ $tlsConfig | toYaml | nindent 6 }} + scheme: https + {{- else }} + scheme: http + {{- end }} {{- with .Values.hubble.metrics.serviceMonitor.relabelings }} relabelings: {{- toYaml . | nindent 4 }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-certmanager/metrics-server-secret.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-certmanager/metrics-server-secret.yaml new file mode 100644 index 000000000..d7f151ae5 --- /dev/null +++ b/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-certmanager/metrics-server-secret.yaml @@ -0,0 +1,32 @@ +{{- if and .Values.hubble.enabled .Values.hubble.metrics.tls.enabled .Values.hubble.tls.auto.enabled (eq .Values.hubble.tls.auto.method "certmanager") }} +{{- $cn := list (.Values.cluster.name | replace "." "-") "hubble-metrics.cilium.io" | join "." }} +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: hubble-metrics-server-certs + namespace: {{ .Release.Namespace }} + {{- with .Values.hubble.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + issuerRef: + {{- toYaml .Values.hubble.tls.auto.certManagerIssuerRef | nindent 4 }} + secretName: hubble-metrics-server-certs + commonName: {{ $cn | quote }} + dnsNames: + - {{ $cn | quote }} + {{- range $dns := .Values.hubble.metrics.tls.server.extraDnsNames }} + - {{ $dns | quote }} + {{- end }} + {{- if .Values.hubble.metrics.tls.server.extraIpAddresses }} + ipAddresses: + {{- range $ip := .Values.hubble.metrics.tls.server.extraIpAddresses }} + - {{ $ip | quote }} + {{- end }} + {{- end }} + duration: {{ printf "%dh0m0s" (mul .Values.hubble.tls.auto.certValidityDuration 24) }} + privateKey: + rotationPolicy: Always +{{- end }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-cronjob/_job-spec.tpl b/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-cronjob/_job-spec.tpl index 4bee347a9..001a3e3cf 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-cronjob/_job-spec.tpl +++ b/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-cronjob/_job-spec.tpl @@ -1,5 +1,5 @@ {{- define "hubble-generate-certs.job.spec" }} -{{- $certValiditySecondsStr := printf "%ds" (mul .Values.hubble.tls.auto.certValidityDuration 24 60 60) -}} +{{- $certValidityStr := printf "%dh" (mul .Values.hubble.tls.auto.certValidityDuration 24) -}} spec: template: metadata: @@ -9,41 +9,121 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} spec: + securityContext: + {{- if semverCompare "<1.30.0" (printf "%d.%d.0" (semver .Capabilities.KubeVersion.Version).Major (semver .Capabilities.KubeVersion.Version).Minor) }} + appArmorProfile: + type: RuntimeDefault + {{- end }} + seccompProfile: + type: RuntimeDefault containers: - name: certgen image: {{ include "cilium.image" .Values.certgen.image | quote }} imagePullPolicy: {{ .Values.certgen.image.pullPolicy }} + securityContext: + capabilities: + drop: + - ALL + allowPrivilegeEscalation: false command: - "/usr/bin/cilium-certgen" # Because this is executed as a job, we pass the values as command # line args instead of via config map. This allows users to inspect # the values used in past runs by inspecting the completed pod. args: - - "--cilium-namespace={{ .Release.Namespace }}" {{- if .Values.debug.enabled }} - "--debug" {{- end }} - "--ca-generate" - "--ca-reuse-secret" - {{- if and .Values.tls.ca.cert .Values.tls.ca.key }} + - "--ca-secret-namespace={{ .Release.Namespace }}" - "--ca-secret-name=cilium-ca" - {{- end }} - - "--hubble-server-cert-generate" - - "--hubble-server-cert-common-name={{ list "*" (.Values.cluster.name | replace "." "-") "hubble-grpc.cilium.io" | join "." }}" - - "--hubble-server-cert-validity-duration={{ $certValiditySecondsStr }}" - {{- if .Values.hubble.relay.enabled }} - - "--hubble-relay-client-cert-generate" - - "--hubble-relay-client-cert-validity-duration={{ $certValiditySecondsStr }}" - {{- end }} - {{- if and .Values.hubble.relay.enabled .Values.hubble.relay.tls.server.enabled }} - - "--hubble-relay-server-cert-generate" - - "--hubble-relay-server-cert-validity-duration={{ $certValiditySecondsStr }}" - {{- end }} + - "--ca-common-name=Cilium CA" + env: + - name: CILIUM_CERTGEN_CONFIG + value: | + certs: + - name: hubble-server-certs + namespace: {{ .Release.Namespace }} + commonName: {{ list "*" (.Values.cluster.name | replace "." "-") "hubble-grpc.cilium.io" | join "." | quote }} + hosts: + - {{ list "*" (.Values.cluster.name | replace "." "-") "hubble-grpc.cilium.io" | join "." | quote }} + {{- range $dns := .Values.hubble.tls.server.extraDnsNames }} + - {{ $dns | quote }} + {{- end }} + {{- range $ip := .Values.hubble.tls.server.extraIpAddresses }} + - {{ $ip | quote }} + {{- end }} + usage: + - signing + - key encipherment + - server auth + validity: {{ $certValidityStr }} + {{- if .Values.hubble.relay.enabled }} + - name: hubble-relay-client-certs + namespace: {{ .Release.Namespace }} + commonName: "*.hubble-relay.cilium.io" + hosts: + - "*.hubble-relay.cilium.io" + usage: + - signing + - key encipherment + - client auth + validity: {{ $certValidityStr }} + {{- end }} + {{- if and .Values.hubble.relay.enabled .Values.hubble.relay.tls.server.enabled }} + - name: hubble-relay-server-certs + namespace: {{ .Release.Namespace }} + commonName: "*.hubble-relay.cilium.io" + hosts: + - "*.hubble-relay.cilium.io" + {{- range $dns := .Values.hubble.relay.tls.server.extraDnsNames }} + - {{ $dns | quote }} + {{- end }} + {{- range $ip := .Values.hubble.relay.tls.server.extraIpAddresses }} + - {{ $ip | quote }} + {{- end }} + usage: + - signing + - key encipherment + - server auth + validity: {{ $certValidityStr }} + {{- end }} + {{- if and .Values.hubble.metrics.enabled .Values.hubble.metrics.tls.enabled }} + - name: hubble-metrics-server-certs + namespace: {{ .Release.Namespace }} + commonName: {{ list (.Values.cluster.name | replace "." "-") "hubble-metrics.cilium.io" | join "." }} | quote }} + hosts: + - {{ list (.Values.cluster.name | replace "." "-") "hubble-metrics.cilium.io" | join "." }} | quote }} + {{- range $dns := .Values.hubble.metrics.tls.server.extraDnsNames }} + - {{ $dns | quote }} + {{- end }} + {{- range $ip := .Values.hubble.metrics.tls.server.extraIpAddresses }} + - {{ $ip | quote }} + {{- end }} + usage: + - signing + - key encipherment + - server auth + validity: {{ $certValidityStr }} + {{- end }} + {{- if and .Values.hubble.ui.enabled .Values.hubble.relay.enabled .Values.hubble.relay.tls.server.enabled }} + - name: hubble-ui-client-certs + namespace: {{ .Release.Namespace }} + commonName: "*.hubble-ui.cilium.io" + hosts: + - "*.hubble-ui.cilium.io" + usage: + - signing + - key encipherment + - client auth + validity: {{ $certValidityStr }} + {{- end }} {{- with .Values.certgen.extraVolumeMounts }} volumeMounts: {{- toYaml . | nindent 10 }} {{- end }} - hostNetwork: true + hostNetwork: false {{- with .Values.certgen.tolerations }} tolerations: {{- toYaml . | nindent 8 }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-cronjob/cronjob.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-cronjob/cronjob.yaml index fa9966080..7d9f7174c 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-cronjob/cronjob.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-cronjob/cronjob.yaml @@ -1,5 +1,5 @@ {{- if and .Values.hubble.enabled .Values.hubble.tls.enabled .Values.hubble.tls.auto.enabled (eq .Values.hubble.tls.auto.method "cronJob") .Values.hubble.tls.auto.schedule }} -apiVersion: {{ include "cronjob.apiVersion" . }} +apiVersion: batch/v1 kind: CronJob metadata: name: hubble-generate-certs diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-cronjob/clusterrole.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-cronjob/role.yaml similarity index 80% rename from argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-cronjob/clusterrole.yaml rename to argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-cronjob/role.yaml index 74d078317..07a38b084 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-cronjob/clusterrole.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-cronjob/role.yaml @@ -1,8 +1,9 @@ -{{- if and .Values.hubble.enabled .Values.hubble.tls.enabled .Values.hubble.tls.auto.enabled (eq .Values.hubble.tls.auto.method "cronJob") .Values.serviceAccounts.hubblecertgen.create }} +{{- if and .Values.hubble.enabled .Values.hubble.tls.enabled .Values.hubble.tls.auto.enabled (eq .Values.hubble.tls.auto.method "cronJob") .Values.serviceAccounts.hubblecertgen.create .Values.rbac.create }} apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole +kind: Role metadata: name: hubble-generate-certs + namespace: {{ .Release.Namespace }} {{- with .Values.hubble.annotations }} annotations: {{- toYaml . | nindent 4 }} @@ -24,6 +25,8 @@ rules: - hubble-server-certs - hubble-relay-client-certs - hubble-relay-server-certs + - hubble-metrics-server-certs + - hubble-ui-client-certs verbs: - update - apiGroups: diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-cronjob/clusterrolebinding.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-cronjob/rolebinding.yaml similarity index 79% rename from argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-cronjob/clusterrolebinding.yaml rename to argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-cronjob/rolebinding.yaml index 5938f16cc..fa56e469d 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-cronjob/clusterrolebinding.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-cronjob/rolebinding.yaml @@ -1,8 +1,9 @@ -{{- if and .Values.hubble.enabled .Values.hubble.tls.enabled .Values.hubble.tls.auto.enabled (eq .Values.hubble.tls.auto.method "cronJob") .Values.serviceAccounts.hubblecertgen.create }} +{{- if and .Values.hubble.enabled .Values.hubble.tls.enabled .Values.hubble.tls.auto.enabled (eq .Values.hubble.tls.auto.method "cronJob") .Values.serviceAccounts.hubblecertgen.create .Values.rbac.create }} apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding +kind: RoleBinding metadata: name: hubble-generate-certs + namespace: {{ .Release.Namespace }} {{- with .Values.hubble.annotations }} annotations: {{- toYaml . | nindent 4 }} @@ -11,7 +12,7 @@ metadata: app.kubernetes.io/part-of: cilium roleRef: apiGroup: rbac.authorization.k8s.io - kind: ClusterRole + kind: Role name: hubble-generate-certs subjects: - kind: ServiceAccount diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-helm/_helpers.tpl b/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-helm/_helpers.tpl deleted file mode 100644 index 79babf94f..000000000 --- a/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-helm/_helpers.tpl +++ /dev/null @@ -1,31 +0,0 @@ -{{/* -Generate TLS certificates for Hubble Server and Hubble Relay. - -Note: Always use this template as follows: - - {{- $_ := include "hubble-generate-certs.helm.setup-ca" . -}} - -The assignment to `$_` is required because we store the generated CI in a global `ca` variable. -Please, don't try to "simplify" this, as without this trick, every generated -certificate would be signed by a different CA. -*/}} -{{- define "hubble-generate-certs.helm.setup-ca" }} - {{- if not .ca }} - {{- $ca := "" -}} - {{- $crt := .Values.tls.ca.cert -}} - {{- $key := .Values.tls.ca.key -}} - {{- if and $crt $key }} - {{- $ca = buildCustomCert $crt $key -}} - {{- else }} - {{- $_ := include "cilium.ca.setup" . -}} - {{- with lookup "v1" "Secret" .Release.Namespace .commonCASecretName }} - {{- $crt := index .data "ca.crt" }} - {{- $key := index .data "ca.key" }} - {{- $ca = buildCustomCert $crt $key -}} - {{- else }} - {{- $ca = .commonCA -}} - {{- end }} - {{- end }} - {{- $_ := set . "ca" $ca -}} - {{- end }} -{{- end }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-helm/metrics-server-secret.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-helm/metrics-server-secret.yaml new file mode 100644 index 000000000..e4e4a8b85 --- /dev/null +++ b/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-helm/metrics-server-secret.yaml @@ -0,0 +1,22 @@ +{{- if and .Values.agent .Values.hubble.enabled .Values.hubble.metrics.tls.enabled .Values.hubble.tls.auto.enabled (eq .Values.hubble.tls.auto.method "helm") }} +{{- $_ := include "cilium.ca.setup" . -}} +{{- $cn := list (.Values.cluster.name | replace "." "-") "hubble-metrics.cilium.io" | join "." }} +{{- $ip := .Values.hubble.metrics.tls.server.extraIpAddresses }} +{{- $dns := prepend .Values.hubble.metrics.tls.server.extraDnsNames $cn }} +{{- $cert := genSignedCert $cn $ip $dns (.Values.hubble.tls.auto.certValidityDuration | int) .commonCA -}} +--- +apiVersion: v1 +kind: Secret +metadata: + name: hubble-metrics-server-certs + namespace: {{ .Release.Namespace }} + {{- with .Values.hubble.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + ca.crt: {{ .commonCA.Cert | b64enc }} + tls.crt: {{ $cert.Cert | b64enc }} + tls.key: {{ $cert.Key | b64enc }} +{{- end }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-helm/relay-client-secret.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-helm/relay-client-secret.yaml index e1d6e8763..f266d9a0a 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-helm/relay-client-secret.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-helm/relay-client-secret.yaml @@ -1,8 +1,8 @@ {{- if and .Values.hubble.enabled .Values.hubble.tls.enabled .Values.hubble.tls.auto.enabled (eq .Values.hubble.tls.auto.method "helm") .Values.hubble.relay.enabled }} -{{- $_ := include "hubble-generate-certs.helm.setup-ca" . -}} +{{- $_ := include "cilium.ca.setup" . -}} {{- $cn := "*.hubble-relay.cilium.io" }} {{- $dns := list $cn }} -{{- $cert := genSignedCert $cn nil $dns (.Values.hubble.tls.auto.certValidityDuration | int) .ca -}} +{{- $cert := genSignedCert $cn nil $dns (.Values.hubble.tls.auto.certValidityDuration | int) .commonCA -}} --- apiVersion: v1 kind: Secret @@ -15,7 +15,7 @@ metadata: {{- end }} type: kubernetes.io/tls data: - ca.crt: {{ .ca.Cert | b64enc }} + ca.crt: {{ .commonCA.Cert | b64enc }} tls.crt: {{ $cert.Cert | b64enc }} tls.key: {{ $cert.Key | b64enc }} {{- end }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-helm/relay-server-secret.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-helm/relay-server-secret.yaml index 902c2be4f..4a3e3ad13 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-helm/relay-server-secret.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-helm/relay-server-secret.yaml @@ -1,9 +1,9 @@ {{- if and .Values.hubble.enabled .Values.hubble.tls.enabled .Values.hubble.tls.auto.enabled (eq .Values.hubble.tls.auto.method "helm") .Values.hubble.relay.enabled .Values.hubble.relay.tls.server.enabled }} -{{- $_ := include "hubble-generate-certs.helm.setup-ca" . -}} +{{- $_ := include "cilium.ca.setup" . -}} {{- $cn := "*.hubble-relay.cilium.io" }} {{- $ip := .Values.hubble.relay.tls.server.extraIpAddresses }} {{- $dns := prepend .Values.hubble.relay.tls.server.extraDnsNames $cn }} -{{- $cert := genSignedCert $cn $ip $dns (.Values.hubble.tls.auto.certValidityDuration | int) .ca -}} +{{- $cert := genSignedCert $cn $ip $dns (.Values.hubble.tls.auto.certValidityDuration | int) .commonCA -}} --- apiVersion: v1 kind: Secret @@ -16,7 +16,7 @@ metadata: {{- end }} type: kubernetes.io/tls data: - ca.crt: {{ .ca.Cert | b64enc }} + ca.crt: {{ .commonCA.Cert | b64enc }} tls.crt: {{ $cert.Cert | b64enc }} tls.key: {{ $cert.Key | b64enc }} {{- end }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-helm/server-secret.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-helm/server-secret.yaml index a05c32667..0f9b3343b 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-helm/server-secret.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-helm/server-secret.yaml @@ -1,9 +1,9 @@ {{- if and .Values.agent .Values.hubble.enabled .Values.hubble.tls.enabled .Values.hubble.tls.auto.enabled (eq .Values.hubble.tls.auto.method "helm") }} -{{- $_ := include "hubble-generate-certs.helm.setup-ca" . -}} +{{- $_ := include "cilium.ca.setup" . -}} {{- $cn := list "*" (.Values.cluster.name | replace "." "-") "hubble-grpc.cilium.io" | join "." }} {{- $ip := .Values.hubble.tls.server.extraIpAddresses }} {{- $dns := prepend .Values.hubble.tls.server.extraDnsNames $cn }} -{{- $cert := genSignedCert $cn $ip $dns (.Values.hubble.tls.auto.certValidityDuration | int) .ca -}} +{{- $cert := genSignedCert $cn $ip $dns (.Values.hubble.tls.auto.certValidityDuration | int) .commonCA -}} --- apiVersion: v1 kind: Secret @@ -16,7 +16,7 @@ metadata: {{- end }} type: kubernetes.io/tls data: - ca.crt: {{ .ca.Cert | b64enc }} + ca.crt: {{ .commonCA.Cert | b64enc }} tls.crt: {{ $cert.Cert | b64enc }} tls.key: {{ $cert.Key | b64enc }} {{- end }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-helm/ui-client-certs.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-helm/ui-client-certs.yaml index 7b385b26b..ea5e69267 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-helm/ui-client-certs.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-helm/ui-client-certs.yaml @@ -1,8 +1,8 @@ {{- if and .Values.hubble.enabled .Values.hubble.tls.enabled .Values.hubble.tls.auto.enabled (eq .Values.hubble.tls.auto.method "helm") .Values.hubble.ui.enabled .Values.hubble.relay.enabled .Values.hubble.relay.tls.server.enabled }} -{{- $_ := include "hubble-generate-certs.helm.setup-ca" . -}} +{{- $_ := include "cilium.ca.setup" . -}} {{- $cn := "*.hubble-ui.cilium.io" }} {{- $dns := list $cn }} -{{- $cert := genSignedCert $cn nil $dns (.Values.hubble.tls.auto.certValidityDuration | int) .ca -}} +{{- $cert := genSignedCert $cn nil $dns (.Values.hubble.tls.auto.certValidityDuration | int) .commonCA -}} --- apiVersion: v1 kind: Secret @@ -15,7 +15,7 @@ metadata: {{- end }} type: kubernetes.io/tls data: - ca.crt: {{ .ca.Cert | b64enc }} + ca.crt: {{ .commonCA.Cert | b64enc }} tls.crt: {{ $cert.Cert | b64enc }} tls.key: {{ $cert.Key | b64enc }} {{- end }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-provided/metrics-server-secret.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-provided/metrics-server-secret.yaml new file mode 100644 index 000000000..f30f45724 --- /dev/null +++ b/argocd-helm-charts/cilium/charts/cilium/templates/hubble/tls-provided/metrics-server-secret.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.agent .Values.hubble.enabled .Values.hubble.metrics.tls.enabled (not .Values.hubble.tls.auto.enabled) }} +apiVersion: v1 +kind: Secret +metadata: + name: hubble-metrics-server-certs + namespace: {{ .Release.Namespace }} + {{- with .Values.hubble.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + ca.crt: {{ .Values.tls.ca.cert }} + tls.crt: {{ .Values.hubble.metrics.tls.server.cert | required "missing hubble.metrics.tls.server.cert" }} + tls.key: {{ .Values.hubble.metrics.tls.server.key | required "missing hubble.metrics.tls.server.key" }} +{{- end }} diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/spire/agent/clusterrole.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/spire/agent/clusterrole.yaml index 90e2165d1..293008404 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/spire/agent/clusterrole.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/spire/agent/clusterrole.yaml @@ -1,4 +1,4 @@ -{{- if and .Values.authentication.mutual.spire.enabled .Values.authentication.mutual.spire.install.enabled .Values.authentication.mutual.spire.install.agent.serviceAccount.create -}} +{{- if and .Values.authentication.mutual.spire.enabled .Values.authentication.mutual.spire.install.enabled .Values.authentication.mutual.spire.install.agent.serviceAccount.create .Values.rbac.create -}} kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/spire/agent/clusterrolebinding.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/spire/agent/clusterrolebinding.yaml index 50fe95881..02265a94a 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/spire/agent/clusterrolebinding.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/spire/agent/clusterrolebinding.yaml @@ -1,4 +1,4 @@ -{{- if and .Values.authentication.mutual.spire.enabled .Values.authentication.mutual.spire.install.enabled .Values.authentication.mutual.spire.install.agent.serviceAccount.create -}} +{{- if and .Values.authentication.mutual.spire.enabled .Values.authentication.mutual.spire.install.enabled .Values.authentication.mutual.spire.install.agent.serviceAccount.create .Values.rbac.create -}} --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/spire/server/clusterrole.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/spire/server/clusterrole.yaml index 31bc2de11..f6d985457 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/spire/server/clusterrole.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/spire/server/clusterrole.yaml @@ -1,4 +1,4 @@ -{{- if and .Values.authentication.mutual.spire.enabled .Values.authentication.mutual.spire.install.enabled .Values.authentication.mutual.spire.install.server.serviceAccount.create -}} +{{- if and .Values.authentication.mutual.spire.enabled .Values.authentication.mutual.spire.install.enabled .Values.authentication.mutual.spire.install.server.serviceAccount.create .Values.rbac.create -}} kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/spire/server/clusterrolebinding.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/spire/server/clusterrolebinding.yaml index 4a04d80c6..750b07acd 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/spire/server/clusterrolebinding.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/spire/server/clusterrolebinding.yaml @@ -1,4 +1,4 @@ -{{- if and .Values.authentication.mutual.spire.enabled .Values.authentication.mutual.spire.install.enabled .Values.authentication.mutual.spire.install.server.serviceAccount.create -}} +{{- if and .Values.authentication.mutual.spire.enabled .Values.authentication.mutual.spire.install.enabled .Values.authentication.mutual.spire.install.server.serviceAccount.create .Values.rbac.create -}} kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: diff --git a/argocd-helm-charts/cilium/charts/cilium/templates/validate.yaml b/argocd-helm-charts/cilium/charts/cilium/templates/validate.yaml index 3c89e4e38..8bc687dbf 100644 --- a/argocd-helm-charts/cilium/charts/cilium/templates/validate.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/templates/validate.yaml @@ -16,7 +16,7 @@ {{- end }} {{/* validate service monitoring CRDs */}} -{{- if or (and .Values.prometheus.enabled .Values.prometheus.serviceMonitor.enabled) (and .Values.operator.prometheus.enabled .Values.operator.prometheus.serviceMonitor.enabled) (and .Values.proxy.prometheus.enabled .Values.envoy.prometheus.enabled .Values.envoy.prometheus.serviceMonitor.enabled) (and .Values.proxy.prometheus.enabled .Values.hubble.relay.prometheus.enabled .Values.hubble.relay.prometheus.serviceMonitor.enabled) }} +{{- if or (and .Values.prometheus.enabled .Values.prometheus.serviceMonitor.enabled) (and .Values.operator.prometheus.enabled .Values.operator.prometheus.serviceMonitor.enabled) (and .Values.envoy.prometheus.enabled .Values.envoy.prometheus.serviceMonitor.enabled) (and .Values.hubble.relay.prometheus.enabled .Values.hubble.relay.prometheus.serviceMonitor.enabled) }} {{- if not (.Capabilities.APIVersions.Has "monitoring.coreos.com/v1") }} {{- if not .Values.prometheus.serviceMonitor.trustCRDsExist }} {{ fail "Service Monitor requires monitoring.coreos.com/v1 CRDs. Please refer to https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml or set .Values.prometheus.serviceMonitor.trustCRDsExist=true" }} @@ -60,6 +60,18 @@ {{- end }} {{- end }} +{{- if and .Values.ingressController.enabled (or (eq .Values.ingressController.service.type "LoadBalancer") (eq .Values.ingressController.service.type "NodePort"))}} + {{- if not (or (eq .Values.ingressController.service.externalTrafficPolicy "Cluster") (eq .Values.ingressController.service.externalTrafficPolicy "Local")) }} + {{ fail "Cilium Ingress services of type 'LoadBalancer' or 'NodePort' need an externalTrafficPolicy set to 'Cluster' or 'Local'." }} + {{- end }} +{{- end }} + +{{- if .Values.gatewayAPI.enabled }} + {{- if not (or (eq .Values.gatewayAPI.externalTrafficPolicy "Cluster") (eq .Values.gatewayAPI.externalTrafficPolicy "Local")) }} + {{ fail "Cilium GatewayAPI needs an externalTrafficPolicy set to 'Cluster' or 'Local'." }} + {{- end }} +{{- end }} + {{- if or .Values.envoyConfig.enabled .Values.ingressController.enabled .Values.gatewayAPI.enabled }} {{- if or (eq (toString .Values.kubeProxyReplacement) "disabled") (and (not (hasKey .Values "kubeProxyReplacement")) (not (semverCompare ">=1.14" (default "1.14" .Values.upgradeCompatibility)))) }} {{ fail "Ingress/Gateway API controller and EnvoyConfig require .Values.kubeProxyReplacement to be explicitly set to 'false' or 'true'" }} @@ -73,12 +85,28 @@ {{- end }} {{/* validate Cilium operator */}} -{{- if eq .Values.enableCiliumEndpointSlice true }} +{{- if or .Values.ciliumEndpointSlice.enabled .Values.enableCiliumEndpointSlice }} {{- if eq .Values.disableEndpointCRD true }} - {{ fail "if Cilium Endpoint Slice is enabled (.Values.enableCiliumEndpointSlice=true), it requires .Values.disableEndpointCRD=false" }} + {{ fail "if Cilium Endpoint Slice is enabled (.Values.ciliumEndpointSlice.enabled=true), it requires .Values.disableEndpointCRD=false" }} {{- end }} {{- end }} +{{/* validate cluster name */}} +{{- if eq .Values.cluster.name "" }} + {{ fail "The cluster name is invalid: cannot be empty" }} +{{- end }} +{{- if semverCompare ">=1.16" (default "1.16" .Values.upgradeCompatibility) }} +{{- if gt (len .Values.cluster.name) 32 }} + {{ fail "The cluster name is invalid: must not be more than 32 characters. Configure 'upgradeCompatibility' to 1.15 or earlier to temporarily skip this check at your own risk" }} +{{- end }} +{{- if not (regexMatch "^([a-z0-9][-a-z0-9]*)?[a-z0-9]$" .Values.cluster.name) }} + {{ fail "The cluster name is invalid: must consist of lower case alphanumeric characters and '-', and must start and end with an alphanumeric character. Configure 'upgradeCompatibility' to 1.15 or earlier to temporarily skip this check at your own risk" }} +{{- end }} +{{- end }} +{{- if and (eq .Values.cluster.name "default") (ne (int .Values.cluster.id) 0) }} + {{ fail "The cluster name is invalid: cannot use default value with cluster.id != 0" }} +{{- end }} + {{/* validate clustermesh-apiserver */}} {{- if .Values.clustermesh.useAPIServer }} {{- if ne .Values.identityAllocationMode "crd" }} @@ -101,3 +129,8 @@ {{- if and (ne (int .Values.clustermesh.maxConnectedClusters) 255) (ne (int .Values.clustermesh.maxConnectedClusters) 511) }} {{- fail "max-connected-clusters must be set to 255 or 511" }} {{- end }} + +{{/*validate Envoy baseID */}} +{{- if not (and (ge (int .Values.envoy.baseID) 0) (le (int .Values.envoy.baseID) 4294967295)) }} + {{- fail "envoy.baseID must be an int. Supported values 0 - 4294967295" }} +{{- end }} diff --git a/argocd-helm-charts/cilium/charts/cilium/values.schema.json b/argocd-helm-charts/cilium/charts/cilium/values.schema.json new file mode 100644 index 000000000..3866ea93e --- /dev/null +++ b/argocd-helm-charts/cilium/charts/cilium/values.schema.json @@ -0,0 +1,5250 @@ +{ + "properties": { + "MTU": { + "type": "integer" + }, + "affinity": { + "properties": { + "podAntiAffinity": { + "properties": { + "requiredDuringSchedulingIgnoredDuringExecution": { + "items": { + "anyOf": [ + { + "properties": { + "labelSelector": { + "properties": { + "matchLabels": { + "properties": { + "k8s-app": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "topologyKey": { + "type": "string" + } + } + } + ] + }, + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "agent": { + "type": "boolean" + }, + "agentNotReadyTaintKey": { + "type": "string" + }, + "aksbyocni": { + "properties": { + "enabled": { + "type": "boolean" + } + }, + "type": "object" + }, + "alibabacloud": { + "properties": { + "enabled": { + "type": "boolean" + } + }, + "type": "object" + }, + "annotateK8sNode": { + "type": "boolean" + }, + "annotations": { + "type": "object" + }, + "apiRateLimit": { + "type": [ + "null", + "string" + ] + }, + "authentication": { + "properties": { + "enabled": { + "type": "boolean" + }, + "gcInterval": { + "type": "string" + }, + "mutual": { + "properties": { + "connectTimeout": { + "type": "string" + }, + "port": { + "type": "integer" + }, + "spire": { + "properties": { + "adminSocketPath": { + "type": "string" + }, + "agentSocketPath": { + "type": "string" + }, + "annotations": { + "type": "object" + }, + "connectionTimeout": { + "type": "string" + }, + "enabled": { + "type": "boolean" + }, + "install": { + "properties": { + "agent": { + "properties": { + "affinity": { + "type": "object" + }, + "annotations": { + "type": "object" + }, + "image": { + "properties": { + "digest": { + "type": "string" + }, + "override": { + "type": [ + "null", + "string" + ] + }, + "pullPolicy": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "tag": { + "type": "string" + }, + "useDigest": { + "type": "boolean" + } + }, + "type": "object" + }, + "labels": { + "type": "object" + }, + "nodeSelector": { + "type": "object" + }, + "podSecurityContext": { + "type": "object" + }, + "securityContext": { + "type": "object" + }, + "serviceAccount": { + "properties": { + "create": { + "type": "boolean" + }, + "name": { + "type": "string" + } + }, + "type": "object" + }, + "skipKubeletVerification": { + "type": "boolean" + }, + "tolerations": { + "items": { + "anyOf": [ + { + "properties": { + "effect": { + "type": "string" + }, + "key": { + "type": "string" + } + } + }, + { + "properties": { + "effect": { + "type": "string" + }, + "key": { + "type": "string" + } + } + }, + { + "properties": { + "effect": { + "type": "string" + }, + "key": { + "type": "string" + } + } + }, + { + "properties": { + "effect": { + "type": "string" + }, + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + } + }, + { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + } + } + } + ] + }, + "type": "array" + } + }, + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "existingNamespace": { + "type": "boolean" + }, + "initImage": { + "properties": { + "digest": { + "type": "string" + }, + "override": { + "type": [ + "null", + "string" + ] + }, + "pullPolicy": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "tag": { + "type": "string" + }, + "useDigest": { + "type": "boolean" + } + }, + "type": "object" + }, + "namespace": { + "type": "string" + }, + "server": { + "properties": { + "affinity": { + "type": "object" + }, + "annotations": { + "type": "object" + }, + "ca": { + "properties": { + "keyType": { + "type": "string" + }, + "subject": { + "properties": { + "commonName": { + "type": "string" + }, + "country": { + "type": "string" + }, + "organization": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "dataStorage": { + "properties": { + "accessMode": { + "type": "string" + }, + "enabled": { + "type": "boolean" + }, + "size": { + "type": "string" + }, + "storageClass": { + "type": [ + "null", + "string" + ] + } + }, + "type": "object" + }, + "image": { + "properties": { + "digest": { + "type": "string" + }, + "override": { + "type": [ + "null", + "string" + ] + }, + "pullPolicy": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "tag": { + "type": "string" + }, + "useDigest": { + "type": "boolean" + } + }, + "type": "object" + }, + "initContainers": { + "items": {}, + "type": "array" + }, + "labels": { + "type": "object" + }, + "nodeSelector": { + "type": "object" + }, + "podSecurityContext": { + "type": "object" + }, + "securityContext": { + "type": "object" + }, + "service": { + "properties": { + "annotations": { + "type": "object" + }, + "labels": { + "type": "object" + }, + "type": { + "type": "string" + } + }, + "type": "object" + }, + "serviceAccount": { + "properties": { + "create": { + "type": "boolean" + }, + "name": { + "type": "string" + } + }, + "type": "object" + }, + "tolerations": { + "items": {}, + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "serverAddress": { + "type": [ + "null", + "string" + ] + }, + "trustDomain": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "queueSize": { + "type": "integer" + }, + "rotatedIdentitiesQueueSize": { + "type": "integer" + } + }, + "type": "object" + }, + "autoDirectNodeRoutes": { + "type": [ + "boolean", + "string" + ] + }, + "azure": { + "properties": { + "enabled": { + "type": "boolean" + } + }, + "type": "object" + }, + "bandwidthManager": { + "properties": { + "bbr": { + "type": "boolean" + }, + "enabled": { + "type": "boolean" + } + }, + "type": "object" + }, + "bgp": { + "properties": { + "announce": { + "properties": { + "loadbalancerIP": { + "type": "boolean" + }, + "podCIDR": { + "type": "boolean" + } + }, + "type": "object" + }, + "enabled": { + "type": "boolean" + } + }, + "type": "object" + }, + "bgpControlPlane": { + "properties": { + "enabled": { + "type": "boolean" + }, + "secretsNamespace": { + "properties": { + "create": { + "type": "boolean" + }, + "name": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "bpf": { + "properties": { + "authMapMax": { + "type": [ + "null", + "integer" + ] + }, + "autoMount": { + "properties": { + "enabled": { + "type": "boolean" + } + }, + "type": "object" + }, + "ctAnyMax": { + "type": [ + "null", + "integer" + ] + }, + "ctTcpMax": { + "type": [ + "null", + "integer" + ] + }, + "datapathMode": { + "type": "string" + }, + "disableExternalIPMitigation": { + "type": "boolean" + }, + "enableTCX": { + "type": "boolean" + }, + "events": { + "properties": { + "drop": { + "properties": { + "enabled": { + "type": "boolean" + } + }, + "type": "object" + }, + "policyVerdict": { + "properties": { + "enabled": { + "type": "boolean" + } + }, + "type": "object" + }, + "trace": { + "properties": { + "enabled": { + "type": "boolean" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "hostLegacyRouting": { + "type": [ + "null", + "boolean" + ] + }, + "lbExternalClusterIP": { + "type": "boolean" + }, + "lbMapMax": { + "type": [ + "null", + "integer" + ] + }, + "mapDynamicSizeRatio": { + "type": [ + "null", + "number" + ] + }, + "masquerade": { + "type": [ + "null", + "boolean" + ] + }, + "monitorAggregation": { + "type": "string" + }, + "monitorFlags": { + "type": "string" + }, + "monitorInterval": { + "type": "string" + }, + "natMax": { + "type": [ + "null", + "integer" + ] + }, + "neighMax": { + "type": [ + "null", + "integer" + ] + }, + "nodeMapMax": { + "type": [ + "null", + "integer" + ] + }, + "policyMapMax": { + "type": [ + "null", + "integer" + ] + }, + "preallocateMaps": { + "type": "boolean" + }, + "root": { + "type": "string" + }, + "tproxy": { + "type": [ + "null", + "boolean" + ] + }, + "vlanBypass": { + "type": [ + "null", + "array" + ] + } + }, + "type": "object" + }, + "bpfClockProbe": { + "type": "boolean" + }, + "certgen": { + "properties": { + "affinity": { + "type": "object" + }, + "annotations": { + "properties": { + "cronJob": { + "type": "object" + }, + "job": { + "type": "object" + } + }, + "type": "object" + }, + "extraVolumeMounts": { + "items": {}, + "type": "array" + }, + "extraVolumes": { + "items": {}, + "type": "array" + }, + "image": { + "properties": { + "digest": { + "type": "string" + }, + "override": { + "type": [ + "null", + "string" + ] + }, + "pullPolicy": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "tag": { + "type": "string" + }, + "useDigest": { + "type": "boolean" + } + }, + "type": "object" + }, + "podLabels": { + "type": "object" + }, + "tolerations": { + "items": {}, + "type": "array" + }, + "ttlSecondsAfterFinished": { + "type": "integer" + } + }, + "type": "object" + }, + "cgroup": { + "properties": { + "autoMount": { + "properties": { + "enabled": { + "type": "boolean" + }, + "resources": { + "type": "object" + } + }, + "type": "object" + }, + "hostRoot": { + "type": "string" + } + }, + "type": "object" + }, + "ciliumEndpointSlice": { + "properties": { + "enabled": { + "type": "boolean" + }, + "rateLimits": { + "items": { + "anyOf": [ + { + "properties": { + "burst": { + "type": "integer" + }, + "limit": { + "type": "integer" + }, + "nodes": { + "type": "integer" + } + } + }, + { + "properties": { + "burst": { + "type": "integer" + }, + "limit": { + "type": "integer" + }, + "nodes": { + "type": "integer" + } + } + }, + { + "properties": { + "burst": { + "type": "integer" + }, + "limit": { + "type": "integer" + }, + "nodes": { + "type": "integer" + } + } + } + ] + }, + "type": "array" + } + }, + "type": "object" + }, + "cleanBpfState": { + "type": "boolean" + }, + "cleanState": { + "type": "boolean" + }, + "cluster": { + "properties": { + "id": { + "type": "integer" + }, + "name": { + "type": "string" + } + }, + "type": "object" + }, + "clustermesh": { + "properties": { + "annotations": { + "type": "object" + }, + "apiserver": { + "properties": { + "affinity": { + "properties": { + "podAntiAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "anyOf": [ + { + "properties": { + "podAffinityTerm": { + "properties": { + "labelSelector": { + "properties": { + "matchLabels": { + "properties": { + "k8s-app": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "topologyKey": { + "type": "string" + } + }, + "type": "object" + }, + "weight": { + "type": "integer" + } + } + } + ] + }, + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "etcd": { + "properties": { + "init": { + "properties": { + "extraArgs": { + "items": {}, + "type": "array" + }, + "extraEnv": { + "items": {}, + "type": "array" + }, + "resources": { + "type": "object" + } + }, + "type": "object" + }, + "lifecycle": { + "type": "object" + }, + "resources": { + "type": "object" + }, + "securityContext": { + "properties": { + "allowPrivilegeEscalation": { + "type": "boolean" + }, + "capabilities": { + "properties": { + "drop": { + "items": { + "anyOf": [ + { + "type": "string" + } + ] + }, + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "storageMedium": { + "enum": [ + "Disk", + "Memory" + ] + } + }, + "type": "object" + }, + "extraArgs": { + "items": {}, + "type": "array" + }, + "extraEnv": { + "items": {}, + "type": "array" + }, + "extraVolumeMounts": { + "items": {}, + "type": "array" + }, + "extraVolumes": { + "items": {}, + "type": "array" + }, + "healthPort": { + "type": "integer" + }, + "image": { + "properties": { + "digest": { + "type": "string" + }, + "override": { + "type": [ + "null", + "string" + ] + }, + "pullPolicy": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "tag": { + "type": "string" + }, + "useDigest": { + "type": "boolean" + } + }, + "type": "object" + }, + "kvstoremesh": { + "properties": { + "enabled": { + "type": "boolean" + }, + "extraArgs": { + "items": {}, + "type": "array" + }, + "extraEnv": { + "items": {}, + "type": "array" + }, + "extraVolumeMounts": { + "items": {}, + "type": "array" + }, + "healthPort": { + "type": "integer" + }, + "lifecycle": { + "type": "object" + }, + "readinessProbe": { + "type": "object" + }, + "resources": { + "type": "object" + }, + "securityContext": { + "properties": { + "allowPrivilegeEscalation": { + "type": "boolean" + }, + "capabilities": { + "properties": { + "drop": { + "items": { + "anyOf": [ + { + "type": "string" + } + ] + }, + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "lifecycle": { + "type": "object" + }, + "metrics": { + "properties": { + "enabled": { + "type": "boolean" + }, + "etcd": { + "properties": { + "enabled": { + "type": "boolean" + }, + "mode": { + "type": "string" + }, + "port": { + "type": "integer" + } + }, + "type": "object" + }, + "kvstoremesh": { + "properties": { + "enabled": { + "type": "boolean" + }, + "port": { + "type": "integer" + } + }, + "type": "object" + }, + "port": { + "type": "integer" + }, + "serviceMonitor": { + "properties": { + "annotations": { + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "etcd": { + "properties": { + "interval": { + "type": "string" + }, + "metricRelabelings": { + "type": [ + "null", + "array" + ] + }, + "relabelings": { + "type": [ + "null", + "array" + ] + } + }, + "type": "object" + }, + "interval": { + "type": "string" + }, + "kvstoremesh": { + "properties": { + "interval": { + "type": "string" + }, + "metricRelabelings": { + "type": [ + "null", + "array" + ] + }, + "relabelings": { + "type": [ + "null", + "array" + ] + } + }, + "type": "object" + }, + "labels": { + "type": "object" + }, + "metricRelabelings": { + "type": [ + "null", + "array" + ] + }, + "relabelings": { + "type": [ + "null", + "array" + ] + } + }, + "type": "object" + } + }, + "type": "object" + }, + "nodeSelector": { + "properties": { + "kubernetes.io/os": { + "type": "string" + } + }, + "type": "object" + }, + "podAnnotations": { + "type": "object" + }, + "podDisruptionBudget": { + "properties": { + "enabled": { + "type": "boolean" + }, + "maxUnavailable": { + "type": [ + "null", + "integer", + "string" + ] + }, + "minAvailable": { + "type": [ + "null", + "integer", + "string" + ] + } + }, + "type": "object" + }, + "podLabels": { + "type": "object" + }, + "podSecurityContext": { + "properties": { + "fsGroup": { + "type": "integer" + }, + "runAsGroup": { + "type": "integer" + }, + "runAsNonRoot": { + "type": "boolean" + }, + "runAsUser": { + "type": "integer" + } + }, + "type": "object" + }, + "priorityClassName": { + "type": "string" + }, + "readinessProbe": { + "type": "object" + }, + "replicas": { + "type": "integer" + }, + "resources": { + "type": "object" + }, + "securityContext": { + "properties": { + "allowPrivilegeEscalation": { + "type": "boolean" + }, + "capabilities": { + "properties": { + "drop": { + "items": { + "anyOf": [ + { + "type": "string" + } + ] + }, + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "service": { + "properties": { + "annotations": { + "type": "object" + }, + "enableSessionAffinity": { + "enum": [ + "HAOnly", + "Always", + "Never" + ] + }, + "externalTrafficPolicy": { + "enum": [ + "Local", + "Cluster" + ] + }, + "internalTrafficPolicy": { + "enum": [ + "Local", + "Cluster" + ] + }, + "loadBalancerClass": { + "type": [ + "null", + "string" + ] + }, + "loadBalancerIP": { + "type": [ + "null", + "string" + ] + }, + "nodePort": { + "type": "integer" + }, + "type": { + "type": "string" + } + }, + "type": "object" + }, + "terminationGracePeriodSeconds": { + "type": "integer" + }, + "tls": { + "properties": { + "admin": { + "properties": { + "cert": { + "type": "string" + }, + "key": { + "type": "string" + } + }, + "type": "object" + }, + "authMode": { + "type": "string" + }, + "auto": { + "properties": { + "certManagerIssuerRef": { + "type": "object" + }, + "certValidityDuration": { + "type": "integer" + }, + "enabled": { + "type": "boolean" + }, + "method": { + "type": "string" + } + }, + "type": "object" + }, + "client": { + "properties": { + "cert": { + "type": "string" + }, + "key": { + "type": "string" + } + }, + "type": "object" + }, + "enableSecrets": { + "type": "boolean" + }, + "remote": { + "properties": { + "cert": { + "type": "string" + }, + "key": { + "type": "string" + } + }, + "type": "object" + }, + "server": { + "properties": { + "cert": { + "type": "string" + }, + "extraDnsNames": { + "items": {}, + "type": "array" + }, + "extraIpAddresses": { + "items": {}, + "type": "array" + }, + "key": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "tolerations": { + "items": {}, + "type": "array" + }, + "topologySpreadConstraints": { + "items": {}, + "type": "array" + }, + "updateStrategy": { + "properties": { + "rollingUpdate": { + "properties": { + "maxSurge": { + "type": [ + "integer", + "string" + ] + }, + "maxUnavailable": { + "type": [ + "integer", + "string" + ] + } + }, + "type": "object" + }, + "type": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "config": { + "properties": { + "clusters": { + "items": {}, + "type": "array" + }, + "domain": { + "type": "string" + }, + "enabled": { + "type": "boolean" + } + }, + "type": "object" + }, + "enableEndpointSliceSynchronization": { + "type": "boolean" + }, + "enableMCSAPISupport": { + "type": "boolean" + }, + "maxConnectedClusters": { + "type": "integer" + }, + "useAPIServer": { + "type": "boolean" + } + }, + "type": "object" + }, + "cni": { + "properties": { + "binPath": { + "type": "string" + }, + "chainingMode": { + "type": [ + "null", + "string" + ] + }, + "chainingTarget": { + "type": [ + "null", + "string" + ] + }, + "confFileMountPath": { + "type": "string" + }, + "confPath": { + "type": "string" + }, + "configMapKey": { + "type": "string" + }, + "customConf": { + "type": "boolean" + }, + "enableRouteMTUForCNIChaining": { + "type": "boolean" + }, + "exclusive": { + "type": "boolean" + }, + "hostConfDirMountPath": { + "type": "string" + }, + "install": { + "type": "boolean" + }, + "logFile": { + "type": "string" + }, + "resources": { + "properties": { + "requests": { + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "uninstall": { + "type": "boolean" + } + }, + "type": "object" + }, + "conntrackGCInterval": { + "type": "string" + }, + "conntrackGCMaxInterval": { + "type": "string" + }, + "crdWaitTimeout": { + "type": "string" + }, + "customCalls": { + "properties": { + "enabled": { + "type": "boolean" + } + }, + "type": "object" + }, + "daemon": { + "properties": { + "allowedConfigOverrides": { + "type": [ + "null", + "string" + ] + }, + "blockedConfigOverrides": { + "type": [ + "null", + "string" + ] + }, + "configSources": { + "type": [ + "null", + "string" + ] + }, + "runPath": { + "type": "string" + } + }, + "type": "object" + }, + "dashboards": { + "properties": { + "annotations": { + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "label": { + "type": "string" + }, + "labelValue": { + "type": "string" + }, + "namespace": { + "type": [ + "null", + "string" + ] + } + }, + "type": "object" + }, + "debug": { + "properties": { + "enabled": { + "type": "boolean" + }, + "verbose": { + "type": [ + "null", + "string" + ] + } + }, + "type": "object" + }, + "directRoutingSkipUnreachable": { + "type": "boolean" + }, + "disableEndpointCRD": { + "type": "boolean" + }, + "dnsPolicy": { + "type": "string" + }, + "dnsProxy": { + "properties": { + "dnsRejectResponseCode": { + "type": "string" + }, + "enableDnsCompression": { + "type": "boolean" + }, + "endpointMaxIpPerHostname": { + "type": "integer" + }, + "idleConnectionGracePeriod": { + "type": "string" + }, + "maxDeferredConnectionDeletes": { + "type": "integer" + }, + "minTtl": { + "type": "integer" + }, + "preCache": { + "type": "string" + }, + "proxyPort": { + "type": "integer" + }, + "proxyResponseMaxDelay": { + "type": "string" + }, + "socketLingerTimeout": { + "type": "integer" + } + }, + "type": "object" + }, + "egressGateway": { + "properties": { + "enabled": { + "type": "boolean" + }, + "reconciliationTriggerInterval": { + "type": "string" + } + }, + "type": "object" + }, + "enableCiliumEndpointSlice": { + "type": "boolean" + }, + "enableCriticalPriorityClass": { + "type": "boolean" + }, + "enableIPv4BIGTCP": { + "type": "boolean" + }, + "enableIPv4Masquerade": { + "type": "boolean" + }, + "enableIPv6BIGTCP": { + "type": "boolean" + }, + "enableIPv6Masquerade": { + "type": "boolean" + }, + "enableK8sTerminatingEndpoint": { + "type": "boolean" + }, + "enableMasqueradeRouteSource": { + "type": "boolean" + }, + "enableRuntimeDeviceDetection": { + "type": "boolean" + }, + "enableXTSocketFallback": { + "type": "boolean" + }, + "encryption": { + "properties": { + "enabled": { + "type": "boolean" + }, + "ipsec": { + "properties": { + "encryptedOverlay": { + "type": "boolean" + }, + "interface": { + "type": "string" + }, + "keyFile": { + "type": "string" + }, + "keyRotationDuration": { + "type": "string" + }, + "keyWatcher": { + "type": "boolean" + }, + "mountPath": { + "type": "string" + }, + "secretName": { + "type": "string" + } + }, + "type": "object" + }, + "nodeEncryption": { + "type": "boolean" + }, + "strictMode": { + "properties": { + "allowRemoteNodeIdentities": { + "type": "boolean" + }, + "cidr": { + "type": "string" + }, + "enabled": { + "type": "boolean" + } + }, + "type": "object" + }, + "type": { + "type": "string" + }, + "wireguard": { + "properties": { + "persistentKeepalive": { + "type": "string" + }, + "userspaceFallback": { + "type": "boolean" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "endpointHealthChecking": { + "properties": { + "enabled": { + "type": "boolean" + } + }, + "type": "object" + }, + "endpointRoutes": { + "properties": { + "enabled": { + "type": [ + "boolean", + "string" + ] + } + }, + "type": "object" + }, + "eni": { + "properties": { + "awsEnablePrefixDelegation": { + "type": "boolean" + }, + "awsReleaseExcessIPs": { + "type": "boolean" + }, + "ec2APIEndpoint": { + "type": "string" + }, + "enabled": { + "type": "boolean" + }, + "eniTags": { + "type": "object" + }, + "gcInterval": { + "type": "string" + }, + "gcTags": { + "type": "object" + }, + "iamRole": { + "type": "string" + }, + "instanceTagsFilter": { + "items": {}, + "type": "array" + }, + "subnetIDsFilter": { + "items": {}, + "type": "array" + }, + "subnetTagsFilter": { + "items": {}, + "type": "array" + }, + "updateEC2AdapterLimitViaAPI": { + "type": "boolean" + } + }, + "type": "object" + }, + "envoy": { + "properties": { + "affinity": { + "properties": { + "nodeAffinity": { + "properties": { + "requiredDuringSchedulingIgnoredDuringExecution": { + "properties": { + "nodeSelectorTerms": { + "items": { + "anyOf": [ + { + "properties": { + "matchExpressions": { + "items": { + "anyOf": [ + { + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "items": { + "anyOf": [ + { + "type": "string" + } + ] + }, + "type": "array" + } + } + } + ] + }, + "type": "array" + } + } + } + ] + }, + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "podAffinity": { + "properties": { + "requiredDuringSchedulingIgnoredDuringExecution": { + "items": { + "anyOf": [ + { + "properties": { + "labelSelector": { + "properties": { + "matchLabels": { + "properties": { + "k8s-app": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "topologyKey": { + "type": "string" + } + } + } + ] + }, + "type": "array" + } + }, + "type": "object" + }, + "podAntiAffinity": { + "properties": { + "requiredDuringSchedulingIgnoredDuringExecution": { + "items": { + "anyOf": [ + { + "properties": { + "labelSelector": { + "properties": { + "matchLabels": { + "properties": { + "k8s-app": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "topologyKey": { + "type": "string" + } + } + } + ] + }, + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "annotations": { + "type": "object" + }, + "baseID": { + "type": "integer" + }, + "connectTimeoutSeconds": { + "type": "integer" + }, + "debug": { + "properties": { + "admin": { + "properties": { + "enabled": { + "type": "boolean" + }, + "port": { + "type": "integer" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "dnsPolicy": { + "type": [ + "null", + "string" + ] + }, + "enabled": { + "type": [ + "null", + "boolean" + ] + }, + "extraArgs": { + "items": {}, + "type": "array" + }, + "extraContainers": { + "items": {}, + "type": "array" + }, + "extraEnv": { + "items": {}, + "type": "array" + }, + "extraHostPathMounts": { + "items": {}, + "type": "array" + }, + "extraVolumeMounts": { + "items": {}, + "type": "array" + }, + "extraVolumes": { + "items": {}, + "type": "array" + }, + "healthPort": { + "type": "integer" + }, + "idleTimeoutDurationSeconds": { + "type": "integer" + }, + "image": { + "properties": { + "digest": { + "type": "string" + }, + "override": { + "type": [ + "null", + "string" + ] + }, + "pullPolicy": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "tag": { + "type": "string" + }, + "useDigest": { + "type": "boolean" + } + }, + "type": "object" + }, + "livenessProbe": { + "properties": { + "failureThreshold": { + "type": "integer" + }, + "periodSeconds": { + "type": "integer" + } + }, + "type": "object" + }, + "log": { + "properties": { + "format": { + "type": "string" + }, + "path": { + "type": "string" + } + }, + "type": "object" + }, + "maxConnectionDurationSeconds": { + "type": "integer" + }, + "maxRequestsPerConnection": { + "type": "integer" + }, + "nodeSelector": { + "properties": { + "kubernetes.io/os": { + "type": "string" + } + }, + "type": "object" + }, + "podAnnotations": { + "type": "object" + }, + "podLabels": { + "type": "object" + }, + "podSecurityContext": { + "properties": { + "appArmorProfile": { + "properties": { + "type": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "priorityClassName": { + "type": [ + "null", + "string" + ] + }, + "prometheus": { + "properties": { + "enabled": { + "type": "boolean" + }, + "port": { + "type": "string" + }, + "serviceMonitor": { + "properties": { + "annotations": { + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "interval": { + "type": "string" + }, + "labels": { + "type": "object" + }, + "metricRelabelings": { + "type": [ + "null", + "array" + ] + }, + "relabelings": { + "items": { + "anyOf": [ + { + "properties": { + "replacement": { + "type": "string" + }, + "sourceLabels": { + "items": { + "anyOf": [ + { + "type": "string" + } + ] + }, + "type": "array" + }, + "targetLabel": { + "type": "string" + } + } + } + ] + }, + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "readinessProbe": { + "properties": { + "failureThreshold": { + "type": "integer" + }, + "periodSeconds": { + "type": "integer" + } + }, + "type": "object" + }, + "resources": { + "type": "object" + }, + "rollOutPods": { + "type": "boolean" + }, + "securityContext": { + "properties": { + "capabilities": { + "properties": { + "envoy": { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "string" + } + ] + }, + "type": "array" + }, + "keepCapNetBindService": { + "type": "boolean" + } + }, + "type": "object" + }, + "privileged": { + "type": "boolean" + }, + "seLinuxOptions": { + "properties": { + "level": { + "type": "string" + }, + "type": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "startupProbe": { + "properties": { + "failureThreshold": { + "type": "integer" + }, + "periodSeconds": { + "type": "integer" + } + }, + "type": "object" + }, + "terminationGracePeriodSeconds": { + "type": "integer" + }, + "tolerations": { + "items": { + "anyOf": [ + { + "properties": { + "operator": { + "type": "string" + } + } + } + ] + }, + "type": "array" + }, + "updateStrategy": { + "properties": { + "rollingUpdate": { + "properties": { + "maxUnavailable": { + "type": [ + "integer", + "string" + ] + } + }, + "type": "object" + }, + "type": { + "type": "string" + } + }, + "type": "object" + }, + "xffNumTrustedHopsL7PolicyEgress": { + "type": "integer" + }, + "xffNumTrustedHopsL7PolicyIngress": { + "type": "integer" + } + }, + "type": "object" + }, + "envoyConfig": { + "properties": { + "enabled": { + "type": "boolean" + }, + "retryInterval": { + "type": "string" + }, + "secretsNamespace": { + "properties": { + "create": { + "type": "boolean" + }, + "name": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "etcd": { + "properties": { + "enabled": { + "type": "boolean" + }, + "endpoints": { + "items": { + "anyOf": [ + { + "type": "string" + } + ] + }, + "type": "array" + }, + "ssl": { + "type": "boolean" + } + }, + "type": "object" + }, + "externalIPs": { + "properties": { + "enabled": { + "type": "boolean" + } + }, + "type": "object" + }, + "externalWorkloads": { + "properties": { + "enabled": { + "type": "boolean" + } + }, + "type": "object" + }, + "extraArgs": { + "items": {}, + "type": "array" + }, + "extraConfig": { + "type": "object" + }, + "extraContainers": { + "items": {}, + "type": "array" + }, + "extraEnv": { + "items": {}, + "type": "array" + }, + "extraHostPathMounts": { + "items": {}, + "type": "array" + }, + "extraInitContainers": { + "items": {}, + "type": "array" + }, + "extraVolumeMounts": { + "items": {}, + "type": "array" + }, + "extraVolumes": { + "items": {}, + "type": "array" + }, + "forceDeviceDetection": { + "type": "boolean" + }, + "gatewayAPI": { + "properties": { + "enableAlpn": { + "type": "boolean" + }, + "enableAppProtocol": { + "type": "boolean" + }, + "enableProxyProtocol": { + "type": "boolean" + }, + "enabled": { + "type": "boolean" + }, + "externalTrafficPolicy": { + "type": "string" + }, + "gatewayClass": { + "properties": { + "create": { + "type": "string" + } + }, + "type": "object" + }, + "hostNetwork": { + "properties": { + "enabled": { + "type": "boolean" + }, + "nodes": { + "properties": { + "matchLabels": { + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "secretsNamespace": { + "properties": { + "create": { + "type": "boolean" + }, + "name": { + "type": "string" + }, + "sync": { + "type": "boolean" + } + }, + "type": "object" + }, + "xffNumTrustedHops": { + "type": "integer" + } + }, + "type": "object" + }, + "gke": { + "properties": { + "enabled": { + "type": "boolean" + } + }, + "type": "object" + }, + "global": { + "type": "object" + }, + "healthChecking": { + "type": "boolean" + }, + "healthPort": { + "type": "integer" + }, + "highScaleIPcache": { + "properties": { + "enabled": { + "type": "boolean" + } + }, + "type": "object" + }, + "hostFirewall": { + "properties": { + "enabled": { + "type": "boolean" + } + }, + "type": "object" + }, + "hostPort": { + "properties": { + "enabled": { + "type": "boolean" + } + }, + "type": "object" + }, + "hubble": { + "properties": { + "annotations": { + "type": "object" + }, + "dropEventEmitter": { + "properties": { + "enabled": { + "type": "boolean" + }, + "interval": { + "type": "string" + }, + "reasons": { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "string" + } + ] + }, + "type": "array" + } + }, + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "export": { + "properties": { + "dynamic": { + "properties": { + "config": { + "properties": { + "configMapName": { + "type": "string" + }, + "content": { + "items": { + "anyOf": [ + { + "properties": { + "excludeFilters": { + "items": {}, + "type": "array" + }, + "fieldMask": { + "items": {}, + "type": "array" + }, + "filePath": { + "type": "string" + }, + "includeFilters": { + "items": {}, + "type": "array" + }, + "name": { + "type": "string" + } + } + } + ] + }, + "type": "array" + }, + "createConfigMap": { + "type": "boolean" + } + }, + "type": "object" + }, + "enabled": { + "type": "boolean" + } + }, + "type": "object" + }, + "fileMaxBackups": { + "type": "integer" + }, + "fileMaxSizeMb": { + "type": "integer" + }, + "static": { + "properties": { + "allowList": { + "items": {}, + "type": "array" + }, + "denyList": { + "items": {}, + "type": "array" + }, + "enabled": { + "type": "boolean" + }, + "fieldMask": { + "items": {}, + "type": "array" + }, + "filePath": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "listenAddress": { + "type": "string" + }, + "metrics": { + "properties": { + "dashboards": { + "properties": { + "annotations": { + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "label": { + "type": "string" + }, + "labelValue": { + "type": "string" + }, + "namespace": { + "type": [ + "null", + "string" + ] + } + }, + "type": "object" + }, + "enableOpenMetrics": { + "type": "boolean" + }, + "enabled": { + "type": [ + "null", + "array" + ] + }, + "port": { + "type": "integer" + }, + "serviceAnnotations": { + "type": "object" + }, + "serviceMonitor": { + "properties": { + "annotations": { + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "interval": { + "type": "string" + }, + "jobLabel": { + "type": "string" + }, + "labels": { + "type": "object" + }, + "metricRelabelings": { + "type": [ + "null", + "array" + ] + }, + "relabelings": { + "items": { + "anyOf": [ + { + "properties": { + "replacement": { + "type": "string" + }, + "sourceLabels": { + "items": { + "anyOf": [ + { + "type": "string" + } + ] + }, + "type": "array" + }, + "targetLabel": { + "type": "string" + } + } + } + ] + }, + "type": "array" + }, + "tlsConfig": { + "type": "object" + } + }, + "type": "object" + }, + "tls": { + "properties": { + "enabled": { + "type": "boolean" + }, + "server": { + "properties": { + "cert": { + "type": "string" + }, + "extraDnsNames": { + "items": {}, + "type": "array" + }, + "extraIpAddresses": { + "items": {}, + "type": "array" + }, + "key": { + "type": "string" + }, + "mtls": { + "properties": { + "enabled": { + "type": "boolean" + }, + "key": { + "type": "string" + }, + "name": { + "type": "null" + }, + "useSecret": { + "type": "boolean" + } + }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "peerService": { + "properties": { + "clusterDomain": { + "type": "string" + }, + "targetPort": { + "type": "integer" + } + }, + "type": "object" + }, + "preferIpv6": { + "type": "boolean" + }, + "redact": { + "properties": { + "enabled": { + "type": "boolean" + }, + "http": { + "properties": { + "headers": { + "properties": { + "allow": { + "items": {}, + "type": "array" + }, + "deny": { + "items": {}, + "type": "array" + } + }, + "type": "object" + }, + "urlQuery": { + "type": "boolean" + }, + "userInfo": { + "type": "boolean" + } + }, + "type": "object" + }, + "kafka": { + "properties": { + "apiKey": { + "type": "boolean" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "relay": { + "properties": { + "affinity": { + "properties": { + "podAffinity": { + "properties": { + "requiredDuringSchedulingIgnoredDuringExecution": { + "items": { + "anyOf": [ + { + "properties": { + "labelSelector": { + "properties": { + "matchLabels": { + "properties": { + "k8s-app": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "topologyKey": { + "type": "string" + } + } + } + ] + }, + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "annotations": { + "type": "object" + }, + "dialTimeout": { + "type": [ + "null", + "string" + ] + }, + "enabled": { + "type": "boolean" + }, + "extraEnv": { + "items": {}, + "type": "array" + }, + "extraVolumeMounts": { + "items": {}, + "type": "array" + }, + "extraVolumes": { + "items": {}, + "type": "array" + }, + "gops": { + "properties": { + "enabled": { + "type": "boolean" + }, + "port": { + "type": "integer" + } + }, + "type": "object" + }, + "image": { + "properties": { + "digest": { + "type": "string" + }, + "override": { + "type": [ + "null", + "string" + ] + }, + "pullPolicy": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "tag": { + "type": "string" + }, + "useDigest": { + "type": "boolean" + } + }, + "type": "object" + }, + "listenHost": { + "type": "string" + }, + "listenPort": { + "type": "string" + }, + "nodeSelector": { + "properties": { + "kubernetes.io/os": { + "type": "string" + } + }, + "type": "object" + }, + "podAnnotations": { + "type": "object" + }, + "podDisruptionBudget": { + "properties": { + "enabled": { + "type": "boolean" + }, + "maxUnavailable": { + "type": [ + "null", + "integer", + "string" + ] + }, + "minAvailable": { + "type": [ + "null", + "integer", + "string" + ] + } + }, + "type": "object" + }, + "podLabels": { + "type": "object" + }, + "podSecurityContext": { + "properties": { + "fsGroup": { + "type": "integer" + } + }, + "type": "object" + }, + "pprof": { + "properties": { + "address": { + "type": "string" + }, + "enabled": { + "type": "boolean" + }, + "port": { + "type": "integer" + } + }, + "type": "object" + }, + "priorityClassName": { + "type": "string" + }, + "prometheus": { + "properties": { + "enabled": { + "type": "boolean" + }, + "port": { + "type": "integer" + }, + "serviceMonitor": { + "properties": { + "annotations": { + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "interval": { + "type": "string" + }, + "labels": { + "type": "object" + }, + "metricRelabelings": { + "type": [ + "null", + "array" + ] + }, + "relabelings": { + "type": [ + "null", + "array" + ] + } + }, + "type": "object" + } + }, + "type": "object" + }, + "replicas": { + "type": "integer" + }, + "resources": { + "type": "object" + }, + "retryTimeout": { + "type": [ + "null", + "string" + ] + }, + "rollOutPods": { + "type": "boolean" + }, + "securityContext": { + "properties": { + "capabilities": { + "properties": { + "drop": { + "items": { + "anyOf": [ + { + "type": "string" + } + ] + }, + "type": "array" + } + }, + "type": "object" + }, + "runAsGroup": { + "type": "integer" + }, + "runAsNonRoot": { + "type": "boolean" + }, + "runAsUser": { + "type": "integer" + } + }, + "type": "object" + }, + "service": { + "properties": { + "nodePort": { + "type": "integer" + }, + "type": { + "type": "string" + } + }, + "type": "object" + }, + "sortBufferDrainTimeout": { + "type": [ + "null", + "string" + ] + }, + "sortBufferLenMax": { + "type": [ + "null", + "integer" + ] + }, + "terminationGracePeriodSeconds": { + "type": "integer" + }, + "tls": { + "properties": { + "client": { + "properties": { + "cert": { + "type": "string" + }, + "key": { + "type": "string" + } + }, + "type": "object" + }, + "server": { + "properties": { + "cert": { + "type": "string" + }, + "enabled": { + "type": "boolean" + }, + "extraDnsNames": { + "items": {}, + "type": "array" + }, + "extraIpAddresses": { + "items": {}, + "type": "array" + }, + "key": { + "type": "string" + }, + "mtls": { + "type": "boolean" + }, + "relayName": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "tolerations": { + "items": {}, + "type": "array" + }, + "topologySpreadConstraints": { + "items": {}, + "type": "array" + }, + "updateStrategy": { + "properties": { + "rollingUpdate": { + "properties": { + "maxUnavailable": { + "type": [ + "integer", + "string" + ] + } + }, + "type": "object" + }, + "type": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "skipUnknownCGroupIDs": { + "type": [ + "null", + "boolean" + ] + }, + "socketPath": { + "type": "string" + }, + "tls": { + "properties": { + "auto": { + "properties": { + "certManagerIssuerRef": { + "type": "object" + }, + "certValidityDuration": { + "type": "integer" + }, + "enabled": { + "type": "boolean" + }, + "method": { + "type": "string" + }, + "schedule": { + "type": "string" + } + }, + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "server": { + "properties": { + "cert": { + "type": "string" + }, + "extraDnsNames": { + "items": {}, + "type": "array" + }, + "extraIpAddresses": { + "items": {}, + "type": "array" + }, + "key": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "ui": { + "properties": { + "affinity": { + "type": "object" + }, + "annotations": { + "type": "object" + }, + "backend": { + "properties": { + "extraEnv": { + "items": {}, + "type": "array" + }, + "extraVolumeMounts": { + "items": {}, + "type": "array" + }, + "extraVolumes": { + "items": {}, + "type": "array" + }, + "image": { + "properties": { + "digest": { + "type": "string" + }, + "override": { + "type": [ + "null", + "string" + ] + }, + "pullPolicy": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "tag": { + "type": "string" + }, + "useDigest": { + "type": "boolean" + } + }, + "type": "object" + }, + "livenessProbe": { + "properties": { + "enabled": { + "type": "boolean" + } + }, + "type": "object" + }, + "readinessProbe": { + "properties": { + "enabled": { + "type": "boolean" + } + }, + "type": "object" + }, + "resources": { + "type": "object" + }, + "securityContext": { + "type": "object" + } + }, + "type": "object" + }, + "baseUrl": { + "type": "string" + }, + "enabled": { + "type": "boolean" + }, + "frontend": { + "properties": { + "extraEnv": { + "items": {}, + "type": "array" + }, + "extraVolumeMounts": { + "items": {}, + "type": "array" + }, + "extraVolumes": { + "items": {}, + "type": "array" + }, + "image": { + "properties": { + "digest": { + "type": "string" + }, + "override": { + "type": [ + "null", + "string" + ] + }, + "pullPolicy": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "tag": { + "type": "string" + }, + "useDigest": { + "type": "boolean" + } + }, + "type": "object" + }, + "resources": { + "type": "object" + }, + "securityContext": { + "type": "object" + }, + "server": { + "properties": { + "ipv6": { + "properties": { + "enabled": { + "type": "boolean" + } + }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "ingress": { + "properties": { + "annotations": { + "type": "object" + }, + "className": { + "type": "string" + }, + "enabled": { + "type": "boolean" + }, + "hosts": { + "items": { + "anyOf": [ + { + "type": "string" + } + ] + }, + "type": "array" + }, + "labels": { + "type": "object" + }, + "tls": { + "items": {}, + "type": "array" + } + }, + "type": "object" + }, + "nodeSelector": { + "properties": { + "kubernetes.io/os": { + "type": "string" + } + }, + "type": "object" + }, + "podAnnotations": { + "type": "object" + }, + "podDisruptionBudget": { + "properties": { + "enabled": { + "type": "boolean" + }, + "maxUnavailable": { + "type": [ + "null", + "integer", + "string" + ] + }, + "minAvailable": { + "type": [ + "null", + "integer", + "string" + ] + } + }, + "type": "object" + }, + "podLabels": { + "type": "object" + }, + "priorityClassName": { + "type": "string" + }, + "replicas": { + "type": "integer" + }, + "rollOutPods": { + "type": "boolean" + }, + "securityContext": { + "properties": { + "fsGroup": { + "type": "integer" + }, + "runAsGroup": { + "type": "integer" + }, + "runAsUser": { + "type": "integer" + } + }, + "type": "object" + }, + "service": { + "properties": { + "annotations": { + "type": "object" + }, + "nodePort": { + "type": "integer" + }, + "type": { + "type": "string" + } + }, + "type": "object" + }, + "standalone": { + "properties": { + "enabled": { + "type": "boolean" + }, + "tls": { + "properties": { + "certsVolume": { + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "tls": { + "properties": { + "client": { + "properties": { + "cert": { + "type": "string" + }, + "key": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "tolerations": { + "items": {}, + "type": "array" + }, + "topologySpreadConstraints": { + "items": {}, + "type": "array" + }, + "updateStrategy": { + "properties": { + "rollingUpdate": { + "properties": { + "maxUnavailable": { + "type": [ + "integer", + "string" + ] + } + }, + "type": "object" + }, + "type": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "identityAllocationMode": { + "type": "string" + }, + "identityChangeGracePeriod": { + "type": "string" + }, + "image": { + "properties": { + "digest": { + "type": "string" + }, + "override": { + "type": [ + "null", + "string" + ] + }, + "pullPolicy": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "tag": { + "type": "string" + }, + "useDigest": { + "type": "boolean" + } + }, + "type": "object" + }, + "imagePullSecrets": { + "items": {}, + "type": "array" + }, + "ingressController": { + "properties": { + "default": { + "type": "boolean" + }, + "defaultSecretName": { + "type": [ + "null", + "string" + ] + }, + "defaultSecretNamespace": { + "type": [ + "null", + "string" + ] + }, + "enableProxyProtocol": { + "type": "boolean" + }, + "enabled": { + "type": "boolean" + }, + "enforceHttps": { + "type": "boolean" + }, + "hostNetwork": { + "properties": { + "enabled": { + "type": "boolean" + }, + "nodes": { + "properties": { + "matchLabels": { + "type": "object" + } + }, + "type": "object" + }, + "sharedListenerPort": { + "type": "integer" + } + }, + "type": "object" + }, + "ingressLBAnnotationPrefixes": { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ] + }, + "type": "array" + }, + "loadbalancerMode": { + "type": "string" + }, + "secretsNamespace": { + "properties": { + "create": { + "type": "boolean" + }, + "name": { + "type": "string" + }, + "sync": { + "type": "boolean" + } + }, + "type": "object" + }, + "service": { + "properties": { + "allocateLoadBalancerNodePorts": { + "type": [ + "null", + "boolean" + ] + }, + "annotations": { + "type": "object" + }, + "externalTrafficPolicy": { + "type": "string" + }, + "insecureNodePort": { + "type": [ + "null", + "integer" + ] + }, + "labels": { + "type": "object" + }, + "loadBalancerClass": { + "type": [ + "null", + "string" + ] + }, + "loadBalancerIP": { + "type": [ + "null", + "string" + ] + }, + "name": { + "type": "string" + }, + "secureNodePort": { + "type": [ + "null", + "integer" + ] + }, + "type": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "initResources": { + "type": "object" + }, + "installNoConntrackIptablesRules": { + "type": "boolean" + }, + "ipMasqAgent": { + "properties": { + "enabled": { + "type": "boolean" + } + }, + "type": "object" + }, + "ipam": { + "properties": { + "ciliumNodeUpdateRate": { + "type": "string" + }, + "mode": { + "type": "string" + }, + "operator": { + "properties": { + "autoCreateCiliumPodIPPools": { + "type": "object" + }, + "clusterPoolIPv4MaskSize": { + "type": "integer" + }, + "clusterPoolIPv4PodCIDRList": { + "items": { + "anyOf": [ + { + "type": "string" + } + ] + }, + "type": [ + "array", + "string" + ] + }, + "clusterPoolIPv6MaskSize": { + "type": "integer" + }, + "clusterPoolIPv6PodCIDRList": { + "items": { + "anyOf": [ + { + "type": "string" + } + ] + }, + "type": [ + "array", + "string" + ] + }, + "externalAPILimitBurstSize": { + "type": [ + "null", + "integer" + ] + }, + "externalAPILimitQPS": { + "type": [ + "null", + "number" + ] + } + }, + "type": "object" + } + }, + "type": "object" + }, + "ipv4": { + "properties": { + "enabled": { + "type": "boolean" + } + }, + "type": "object" + }, + "ipv4NativeRoutingCIDR": { + "type": "string" + }, + "ipv6": { + "properties": { + "enabled": { + "type": "boolean" + } + }, + "type": "object" + }, + "ipv6NativeRoutingCIDR": { + "type": "string" + }, + "k8s": { + "properties": { + "requireIPv4PodCIDR": { + "type": "boolean" + }, + "requireIPv6PodCIDR": { + "type": "boolean" + } + }, + "type": "object" + }, + "k8sClientRateLimit": { + "properties": { + "burst": { + "type": [ + "null", + "integer" + ] + }, + "qps": { + "type": [ + "null", + "integer" + ] + } + }, + "type": "object" + }, + "k8sNetworkPolicy": { + "properties": { + "enabled": { + "type": "boolean" + } + }, + "type": "object" + }, + "k8sServiceHost": { + "type": "string" + }, + "k8sServicePort": { + "type": [ + "string", + "integer" + ] + }, + "keepDeprecatedLabels": { + "type": "boolean" + }, + "keepDeprecatedProbes": { + "type": "boolean" + }, + "kubeConfigPath": { + "type": "string" + }, + "kubeProxyReplacementHealthzBindAddr": { + "type": "string" + }, + "l2NeighDiscovery": { + "properties": { + "enabled": { + "type": "boolean" + }, + "refreshPeriod": { + "type": "string" + } + }, + "type": "object" + }, + "l2announcements": { + "properties": { + "enabled": { + "type": "boolean" + } + }, + "type": "object" + }, + "l2podAnnouncements": { + "properties": { + "enabled": { + "type": "boolean" + }, + "interface": { + "type": "string" + } + }, + "type": "object" + }, + "l7Proxy": { + "type": "boolean" + }, + "livenessProbe": { + "properties": { + "failureThreshold": { + "type": "integer" + }, + "periodSeconds": { + "type": "integer" + } + }, + "type": "object" + }, + "loadBalancer": { + "properties": { + "acceleration": { + "type": "string" + }, + "l7": { + "properties": { + "algorithm": { + "type": "string" + }, + "backend": { + "type": "string" + }, + "ports": { + "items": {}, + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "localRedirectPolicy": { + "type": "boolean" + }, + "logSystemLoad": { + "type": "boolean" + }, + "maglev": { + "type": "object" + }, + "monitor": { + "properties": { + "enabled": { + "type": "boolean" + } + }, + "type": "object" + }, + "name": { + "type": "string" + }, + "nat46x64Gateway": { + "properties": { + "enabled": { + "type": "boolean" + } + }, + "type": "object" + }, + "nodeIPAM": { + "properties": { + "enabled": { + "type": "boolean" + } + }, + "type": "object" + }, + "nodePort": { + "properties": { + "addresses": { + "type": [ + "null", + "string", + "array" + ] + }, + "autoProtectPortRange": { + "type": "boolean" + }, + "bindProtection": { + "type": "boolean" + }, + "enableHealthCheck": { + "type": "boolean" + }, + "enableHealthCheckLoadBalancerIP": { + "type": "boolean" + }, + "enabled": { + "type": "boolean" + } + }, + "type": "object" + }, + "nodeSelector": { + "properties": { + "kubernetes.io/os": { + "type": "string" + } + }, + "type": "object" + }, + "nodeSelectorLabels": { + "type": "boolean" + }, + "nodeinit": { + "properties": { + "affinity": { + "type": "object" + }, + "annotations": { + "type": "object" + }, + "bootstrapFile": { + "type": "string" + }, + "enabled": { + "type": "boolean" + }, + "extraEnv": { + "items": {}, + "type": "array" + }, + "extraVolumeMounts": { + "items": {}, + "type": "array" + }, + "extraVolumes": { + "items": {}, + "type": "array" + }, + "image": { + "properties": { + "digest": { + "type": "string" + }, + "override": { + "type": [ + "null", + "string" + ] + }, + "pullPolicy": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "tag": { + "type": "string" + }, + "useDigest": { + "type": "boolean" + } + }, + "type": "object" + }, + "nodeSelector": { + "properties": { + "kubernetes.io/os": { + "type": "string" + } + }, + "type": "object" + }, + "podAnnotations": { + "type": "object" + }, + "podLabels": { + "type": "object" + }, + "podSecurityContext": { + "properties": { + "appArmorProfile": { + "properties": { + "type": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "prestop": { + "properties": { + "postScript": { + "type": "string" + }, + "preScript": { + "type": "string" + } + }, + "type": "object" + }, + "priorityClassName": { + "type": "string" + }, + "resources": { + "properties": { + "requests": { + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "securityContext": { + "properties": { + "capabilities": { + "properties": { + "add": { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ] + }, + "type": "array" + } + }, + "type": "object" + }, + "privileged": { + "type": "boolean" + }, + "seLinuxOptions": { + "properties": { + "level": { + "type": "string" + }, + "type": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "startup": { + "properties": { + "postScript": { + "type": "string" + }, + "preScript": { + "type": "string" + } + }, + "type": "object" + }, + "tolerations": { + "items": { + "anyOf": [ + { + "properties": { + "operator": { + "type": "string" + } + } + } + ] + }, + "type": "array" + }, + "updateStrategy": { + "properties": { + "type": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "operator": { + "properties": { + "affinity": { + "properties": { + "podAntiAffinity": { + "properties": { + "requiredDuringSchedulingIgnoredDuringExecution": { + "items": { + "anyOf": [ + { + "properties": { + "labelSelector": { + "properties": { + "matchLabels": { + "properties": { + "io.cilium/app": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "topologyKey": { + "type": "string" + } + } + } + ] + }, + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "annotations": { + "type": "object" + }, + "dashboards": { + "properties": { + "annotations": { + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "label": { + "type": "string" + }, + "labelValue": { + "type": "string" + }, + "namespace": { + "type": [ + "null", + "string" + ] + } + }, + "type": "object" + }, + "dnsPolicy": { + "type": "string" + }, + "enabled": { + "type": "boolean" + }, + "endpointGCInterval": { + "type": "string" + }, + "extraArgs": { + "items": {}, + "type": "array" + }, + "extraEnv": { + "items": {}, + "type": "array" + }, + "extraHostPathMounts": { + "items": {}, + "type": "array" + }, + "extraVolumeMounts": { + "items": {}, + "type": "array" + }, + "extraVolumes": { + "items": {}, + "type": "array" + }, + "hostNetwork": { + "type": "boolean" + }, + "identityGCInterval": { + "type": "string" + }, + "identityHeartbeatTimeout": { + "type": "string" + }, + "image": { + "properties": { + "alibabacloudDigest": { + "type": "string" + }, + "awsDigest": { + "type": "string" + }, + "azureDigest": { + "type": "string" + }, + "genericDigest": { + "type": "string" + }, + "override": { + "type": [ + "null", + "string" + ] + }, + "pullPolicy": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "suffix": { + "type": "string" + }, + "tag": { + "type": "string" + }, + "useDigest": { + "type": "boolean" + } + }, + "type": "object" + }, + "nodeGCInterval": { + "type": "string" + }, + "nodeSelector": { + "properties": { + "kubernetes.io/os": { + "type": "string" + } + }, + "type": "object" + }, + "podAnnotations": { + "type": "object" + }, + "podDisruptionBudget": { + "properties": { + "enabled": { + "type": "boolean" + }, + "maxUnavailable": { + "type": [ + "null", + "integer", + "string" + ] + }, + "minAvailable": { + "type": [ + "null", + "integer", + "string" + ] + } + }, + "type": "object" + }, + "podLabels": { + "type": "object" + }, + "podSecurityContext": { + "type": "object" + }, + "pprof": { + "properties": { + "address": { + "type": "string" + }, + "enabled": { + "type": "boolean" + }, + "port": { + "type": "integer" + } + }, + "type": "object" + }, + "priorityClassName": { + "type": "string" + }, + "prometheus": { + "properties": { + "enabled": { + "type": "boolean" + }, + "port": { + "type": "integer" + }, + "serviceMonitor": { + "properties": { + "annotations": { + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "interval": { + "type": "string" + }, + "jobLabel": { + "type": "string" + }, + "labels": { + "type": "object" + }, + "metricRelabelings": { + "type": [ + "null", + "array" + ] + }, + "relabelings": { + "type": [ + "null", + "array" + ] + } + }, + "type": "object" + } + }, + "type": "object" + }, + "removeNodeTaints": { + "type": "boolean" + }, + "replicas": { + "type": "integer" + }, + "resources": { + "type": "object" + }, + "rollOutPods": { + "type": "boolean" + }, + "securityContext": { + "type": "object" + }, + "setNodeNetworkStatus": { + "type": "boolean" + }, + "setNodeTaints": { + "type": [ + "null", + "boolean" + ] + }, + "skipCRDCreation": { + "type": "boolean" + }, + "tolerations": { + "items": { + "anyOf": [ + { + "properties": { + "operator": { + "type": "string" + } + } + } + ] + }, + "type": "array" + }, + "topologySpreadConstraints": { + "items": {}, + "type": "array" + }, + "unmanagedPodWatcher": { + "properties": { + "intervalSeconds": { + "type": "integer" + }, + "restart": { + "type": "boolean" + } + }, + "type": "object" + }, + "updateStrategy": { + "properties": { + "rollingUpdate": { + "properties": { + "maxSurge": { + "type": [ + "integer", + "string" + ] + }, + "maxUnavailable": { + "type": [ + "integer", + "string" + ] + } + }, + "type": "object" + }, + "type": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "pmtuDiscovery": { + "properties": { + "enabled": { + "type": "boolean" + } + }, + "type": "object" + }, + "podAnnotations": { + "type": "object" + }, + "podLabels": { + "type": "object" + }, + "podSecurityContext": { + "properties": { + "appArmorProfile": { + "properties": { + "type": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "policyCIDRMatchMode": { + "type": [ + "null", + "string", + "array" + ] + }, + "policyEnforcementMode": { + "type": "string" + }, + "pprof": { + "properties": { + "address": { + "type": "string" + }, + "enabled": { + "type": "boolean" + }, + "port": { + "type": "integer" + } + }, + "type": "object" + }, + "preflight": { + "properties": { + "affinity": { + "properties": { + "podAffinity": { + "properties": { + "requiredDuringSchedulingIgnoredDuringExecution": { + "items": { + "anyOf": [ + { + "properties": { + "labelSelector": { + "properties": { + "matchLabels": { + "properties": { + "k8s-app": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "topologyKey": { + "type": "string" + } + } + } + ] + }, + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "annotations": { + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "extraEnv": { + "items": {}, + "type": "array" + }, + "extraVolumeMounts": { + "items": {}, + "type": "array" + }, + "extraVolumes": { + "items": {}, + "type": "array" + }, + "image": { + "properties": { + "digest": { + "type": "string" + }, + "override": { + "type": [ + "null", + "string" + ] + }, + "pullPolicy": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "tag": { + "type": "string" + }, + "useDigest": { + "type": "boolean" + } + }, + "type": "object" + }, + "nodeSelector": { + "properties": { + "kubernetes.io/os": { + "type": "string" + } + }, + "type": "object" + }, + "podAnnotations": { + "type": "object" + }, + "podDisruptionBudget": { + "properties": { + "enabled": { + "type": "boolean" + }, + "maxUnavailable": { + "type": [ + "null", + "integer", + "string" + ] + }, + "minAvailable": { + "type": [ + "null", + "integer", + "string" + ] + } + }, + "type": "object" + }, + "podLabels": { + "type": "object" + }, + "podSecurityContext": { + "type": "object" + }, + "priorityClassName": { + "type": "string" + }, + "readinessProbe": { + "properties": { + "initialDelaySeconds": { + "type": "integer" + }, + "periodSeconds": { + "type": "integer" + } + }, + "type": "object" + }, + "resources": { + "type": "object" + }, + "securityContext": { + "type": "object" + }, + "terminationGracePeriodSeconds": { + "type": "integer" + }, + "tofqdnsPreCache": { + "type": "string" + }, + "tolerations": { + "items": { + "anyOf": [ + { + "properties": { + "operator": { + "type": "string" + } + } + } + ] + }, + "type": "array" + }, + "updateStrategy": { + "properties": { + "type": { + "type": "string" + } + }, + "type": "object" + }, + "validateCNPs": { + "type": "boolean" + } + }, + "type": "object" + }, + "priorityClassName": { + "type": "string" + }, + "prometheus": { + "properties": { + "controllerGroupMetrics": { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ] + }, + "type": "array" + }, + "enabled": { + "type": "boolean" + }, + "metrics": { + "type": [ + "null", + "array" + ] + }, + "port": { + "type": "integer" + }, + "serviceMonitor": { + "properties": { + "annotations": { + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "interval": { + "type": "string" + }, + "jobLabel": { + "type": "string" + }, + "labels": { + "type": "object" + }, + "metricRelabelings": { + "type": [ + "null", + "array" + ] + }, + "relabelings": { + "items": { + "anyOf": [ + { + "properties": { + "replacement": { + "type": "string" + }, + "sourceLabels": { + "items": { + "anyOf": [ + { + "type": "string" + } + ] + }, + "type": "array" + }, + "targetLabel": { + "type": "string" + } + } + } + ] + }, + "type": "array" + }, + "trustCRDsExist": { + "type": "boolean" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "rbac": { + "properties": { + "create": { + "type": "boolean" + } + }, + "type": "object" + }, + "readinessProbe": { + "properties": { + "failureThreshold": { + "type": "integer" + }, + "periodSeconds": { + "type": "integer" + } + }, + "type": "object" + }, + "resourceQuotas": { + "properties": { + "cilium": { + "properties": { + "hard": { + "properties": { + "pods": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "operator": { + "properties": { + "hard": { + "properties": { + "pods": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "resources": { + "type": "object" + }, + "rollOutCiliumPods": { + "type": "boolean" + }, + "routingMode": { + "type": "string" + }, + "sctp": { + "properties": { + "enabled": { + "type": "boolean" + } + }, + "type": "object" + }, + "securityContext": { + "properties": { + "capabilities": { + "properties": { + "applySysctlOverwrites": { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ] + }, + "type": "array" + }, + "ciliumAgent": { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ] + }, + "type": "array" + }, + "cleanCiliumState": { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ] + }, + "type": "array" + }, + "mountCgroup": { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ] + }, + "type": "array" + } + }, + "type": "object" + }, + "privileged": { + "type": "boolean" + }, + "seLinuxOptions": { + "properties": { + "level": { + "type": "string" + }, + "type": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "serviceAccounts": { + "properties": { + "cilium": { + "properties": { + "annotations": { + "type": "object" + }, + "automount": { + "type": "boolean" + }, + "create": { + "type": "boolean" + }, + "name": { + "type": "string" + } + }, + "type": "object" + }, + "clustermeshApiserver": { + "properties": { + "annotations": { + "type": "object" + }, + "automount": { + "type": "boolean" + }, + "create": { + "type": "boolean" + }, + "name": { + "type": "string" + } + }, + "type": "object" + }, + "clustermeshcertgen": { + "properties": { + "annotations": { + "type": "object" + }, + "automount": { + "type": "boolean" + }, + "create": { + "type": "boolean" + }, + "name": { + "type": "string" + } + }, + "type": "object" + }, + "envoy": { + "properties": { + "annotations": { + "type": "object" + }, + "automount": { + "type": "boolean" + }, + "create": { + "type": "boolean" + }, + "name": { + "type": "string" + } + }, + "type": "object" + }, + "hubblecertgen": { + "properties": { + "annotations": { + "type": "object" + }, + "automount": { + "type": "boolean" + }, + "create": { + "type": "boolean" + }, + "name": { + "type": "string" + } + }, + "type": "object" + }, + "nodeinit": { + "properties": { + "annotations": { + "type": "object" + }, + "automount": { + "type": "boolean" + }, + "create": { + "type": "boolean" + }, + "enabled": { + "type": "boolean" + }, + "name": { + "type": "string" + } + }, + "type": "object" + }, + "operator": { + "properties": { + "annotations": { + "type": "object" + }, + "automount": { + "type": "boolean" + }, + "create": { + "type": "boolean" + }, + "name": { + "type": "string" + } + }, + "type": "object" + }, + "preflight": { + "properties": { + "annotations": { + "type": "object" + }, + "automount": { + "type": "boolean" + }, + "create": { + "type": "boolean" + }, + "name": { + "type": "string" + } + }, + "type": "object" + }, + "relay": { + "properties": { + "annotations": { + "type": "object" + }, + "automount": { + "type": "boolean" + }, + "create": { + "type": "boolean" + }, + "name": { + "type": "string" + } + }, + "type": "object" + }, + "ui": { + "properties": { + "annotations": { + "type": "object" + }, + "automount": { + "type": "boolean" + }, + "create": { + "type": "boolean" + }, + "name": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "serviceNoBackendResponse": { + "type": "string" + }, + "sleepAfterInit": { + "type": "boolean" + }, + "socketLB": { + "properties": { + "enabled": { + "type": "boolean" + } + }, + "type": "object" + }, + "startupProbe": { + "properties": { + "failureThreshold": { + "type": "integer" + }, + "periodSeconds": { + "type": "integer" + } + }, + "type": "object" + }, + "svcSourceRangeCheck": { + "type": "boolean" + }, + "synchronizeK8sNodes": { + "type": "boolean" + }, + "sysctlfix": { + "properties": { + "enabled": { + "type": "boolean" + } + }, + "type": "object" + }, + "terminationGracePeriodSeconds": { + "type": "integer" + }, + "tls": { + "properties": { + "ca": { + "properties": { + "cert": { + "type": "string" + }, + "certValidityDuration": { + "type": "integer" + }, + "key": { + "type": "string" + } + }, + "type": "object" + }, + "caBundle": { + "properties": { + "enabled": { + "type": "boolean" + }, + "key": { + "type": "string" + }, + "name": { + "type": "string" + }, + "useSecret": { + "type": "boolean" + } + }, + "type": "object" + }, + "secretsBackend": { + "type": "string" + } + }, + "type": "object" + }, + "tolerations": { + "items": { + "anyOf": [ + { + "properties": { + "operator": { + "type": "string" + } + } + } + ] + }, + "type": "array" + }, + "tunnelPort": { + "type": "integer" + }, + "tunnelProtocol": { + "type": "string" + }, + "updateStrategy": { + "properties": { + "rollingUpdate": { + "properties": { + "maxUnavailable": { + "type": [ + "integer", + "string" + ] + } + }, + "type": "object" + }, + "type": { + "type": "string" + } + }, + "type": "object" + }, + "upgradeCompatibility": { + "type": [ + "null", + "string" + ] + }, + "vtep": { + "properties": { + "cidr": { + "type": "string" + }, + "enabled": { + "type": "boolean" + }, + "endpoint": { + "type": "string" + }, + "mac": { + "type": "string" + }, + "mask": { + "type": "string" + } + }, + "type": "object" + }, + "waitForKubeProxy": { + "type": "boolean" + }, + "wellKnownIdentities": { + "properties": { + "enabled": { + "type": "boolean" + } + }, + "type": "object" + } + }, + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object" +} \ No newline at end of file diff --git a/argocd-helm-charts/cilium/charts/cilium/values.yaml b/argocd-helm-charts/cilium/charts/cilium/values.yaml index 9603d4040..b74b64a5d 100644 --- a/argocd-helm-charts/cilium/charts/cilium/values.yaml +++ b/argocd-helm-charts/cilium/charts/cilium/values.yaml @@ -2,15 +2,20 @@ # This file is based on install/kubernetes/cilium/*values.yaml.tmpl. -# upgradeCompatibility helps users upgrading to ensure that the configMap for +# @schema +# type: [null, string] +# @schema +# -- upgradeCompatibility helps users upgrading to ensure that the configMap for # Cilium will not change critical values to ensure continued operation # This flag is not required for new installations. -# For example: 1.7, 1.8, 1.9 -# upgradeCompatibility: '1.8' - +# For example: '1.7', '1.8', '1.9' +upgradeCompatibility: null debug: # -- Enable debug logging enabled: false + # @schema + # type: [null, string] + # @schema # -- Configure verbosity levels for debug logging # This option is used to enable debug messages for operations related to such # sub-system such as (e.g. kvstore, envoy, datapath or policy), and flow is @@ -24,45 +29,54 @@ debug: # - datapath # - policy verbose: ~ - rbac: # -- Enable creation of Resource-Based Access Control configuration. create: true - # -- Configure image pull secrets for pulling container images -imagePullSecrets: +imagePullSecrets: [] # - name: "image-pull-secret" # -- (string) Kubernetes config path # @default -- `"~/.kube/config"` kubeConfigPath: "" -# -- (string) Kubernetes service host +# -- (string) Kubernetes service host - use "auto" for automatic lookup from the cluster-info ConfigMap (kubeadm-based clusters only) k8sServiceHost: "" +# @schema +# type: [string, integer] +# @schema # -- (string) Kubernetes service port k8sServicePort: "" - # -- Configure the client side rate limit for the agent and operator # # If the amount of requests to the Kubernetes API server exceeds the configured # rate limit, the agent and operator will start to throttle requests by delaying # them until there is budget or the request times out. k8sClientRateLimit: + # @schema + # type: [null, integer] + # @schema # -- (int) The sustained request rate in requests per second. # @default -- 5 for k8s up to 1.26. 10 for k8s version 1.27+ qps: + # @schema + # type: [null, integer] + # @schema # -- (int) The burst request rate in requests per second. # The rate limiter will allow short bursts with a higher rate. # @default -- 10 for k8s up to 1.26. 20 for k8s version 1.27+ burst: - cluster: # -- Name of the cluster. Only required for Cluster Mesh and mutual authentication with SPIRE. + # It must respect the following constraints: + # * It must contain at most 32 characters; + # * It must begin and end with a lower case alphanumeric character; + # * It may contain lower case alphanumeric characters and dashes between. + # The "default" name cannot be used if the Cluster ID is different from 0. name: default # -- (int) Unique ID of the cluster. Must be unique across all connected # clusters and in the range of 1 to 255. Only required for Cluster Mesh, # may be 0 if Cluster Mesh is not used. id: 0 - # -- Define serviceAccount names for components. # @default -- Component's fully qualified name. serviceAccounts: @@ -87,11 +101,6 @@ serviceAccounts: name: cilium-envoy automount: true annotations: {} - etcd: - create: true - name: cilium-etcd-operator - automount: true - annotations: {} operator: create: true name: cilium-operator @@ -129,82 +138,71 @@ serviceAccounts: name: hubble-generate-certs automount: true annotations: {} - # -- Configure termination grace period for cilium-agent DaemonSet. terminationGracePeriodSeconds: 1 - # -- Install the cilium agent resources. agent: true - # -- Agent container name. name: cilium - # -- Roll out cilium agent pods automatically when configmap is updated. rollOutCiliumPods: false - # -- Agent container image. image: + # @schema + # type: [null, string] + # @schema override: ~ repository: "quay.io/cilium/cilium" - tag: "v1.15.6" + tag: "v1.16.0" pullPolicy: "IfNotPresent" # cilium-digest - digest: "sha256:6aa840986a3a9722cd967ef63248d675a87add7e1704740902d5d3162f0c0def" + digest: "sha256:46ffa4ef3cf6d8885dcc4af5963b0683f7d59daa90d49ed9fb68d3b1627fe058" useDigest: true - # -- Affinity for cilium-agent. affinity: podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - - topologyKey: kubernetes.io/hostname - labelSelector: - matchLabels: - k8s-app: cilium - + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: cilium # -- Node selector for cilium-agent. nodeSelector: kubernetes.io/os: linux - # -- Node tolerations for agent scheduling to nodes with taints # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ tolerations: -- operator: Exists - # - key: "key" - # operator: "Equal|Exists" - # value: "value" - # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" - + - operator: Exists + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" # -- The priority class to use for cilium-agent. priorityClassName: "" - # -- DNS policy for Cilium agent pods. # Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy dnsPolicy: "" - # -- Additional containers added to the cilium DaemonSet. extraContainers: [] - +# -- Additional initContainers added to the cilium Daemonset. +extraInitContainers: [] # -- Additional agent container arguments. extraArgs: [] - # -- Additional agent container environment variables. extraEnv: [] - # -- Additional agent hostPath mounts. extraHostPathMounts: [] - # - name: host-mnt-data - # mountPath: /host/mnt/data - # hostPath: /mnt/data - # hostPathType: Directory - # readOnly: true - # mountPropagation: HostToContainer +# - name: host-mnt-data +# mountPath: /host/mnt/data +# hostPath: /mnt/data +# hostPathType: Directory +# readOnly: true +# mountPropagation: HostToContainer # -- Additional agent volumes. extraVolumes: [] - # -- Additional agent volumeMounts. extraVolumeMounts: [] - # -- extraConfig allows you to specify additional configuration parameters to be # included in the cilium-config configmap. extraConfig: {} @@ -216,7 +214,6 @@ extraConfig: {} # -- Annotations to be added to all top-level cilium-agent objects (resources under templates/cilium-agent) annotations: {} - # -- Security Context for cilium-agent pods. podSecurityContext: # -- AppArmorProfile options for the `cilium-agent` and init containers @@ -224,23 +221,20 @@ podSecurityContext: type: "Unconfined" # -- Annotations to be added to agent pods podAnnotations: {} - # -- Labels to be added to agent pods podLabels: {} - # -- Agent resource limits & requests # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ resources: {} - # limits: - # cpu: 4000m - # memory: 4Gi - # requests: - # cpu: 100m - # memory: 512Mi +# limits: +# cpu: 4000m +# memory: 4Gi +# requests: +# cpu: 100m +# memory: 512Mi # -- resources & limits for the agent init containers initResources: {} - securityContext: # -- User to run the pod with # runAsUser: 0 @@ -268,6 +262,7 @@ securityContext: - IPC_LOCK # Used in iptables. Consider removing once we are iptables-free - SYS_MODULE + # Needed to switch network namespaces (used for health endpoint, socket-LB). # We need it for now but might not need it for >= 5.11 specially # for the 'SYS_RESOURCE'. # In >= 5.8 there's already BPF and PERMON capabilities @@ -322,28 +317,31 @@ securityContext: # If available, SYS_ADMIN can be removed. #- PERFMON #- BPF - # -- Cilium agent update strategy updateStrategy: type: RollingUpdate rollingUpdate: + # @schema + # type: [integer, string] + # @schema maxUnavailable: 2 - # Configuration Values for cilium-agent - aksbyocni: # -- Enable AKS BYOCNI integration. # Note that this is incompatible with AKS clusters not created in BYOCNI mode: # use Azure integration (`azure.enabled`) instead. enabled: false - +# @schema +# type: [boolean, string] +# @schema # -- Enable installation of PodCIDR routes between worker # nodes if worker nodes share a common L2 network segment. autoDirectNodeRoutes: false - +# -- Enable skipping of PodCIDR routes between worker +# nodes if the worker nodes are in a different L2 network segment. +directRoutingSkipUnreachable: false # -- Annotate k8s node upon initialization with Cilium's metadata. annotateK8sNode: false - azure: # -- Enable Azure integration. # Note that this is incompatible with AKS clusters created in BYOCNI mode: use @@ -356,11 +354,9 @@ azure: # clientID: 00000000-0000-0000-0000-000000000000 # clientSecret: 00000000-0000-0000-0000-000000000000 # userAssignedIdentityID: 00000000-0000-0000-0000-000000000000 - alibabacloud: # -- Enable AlibabaCloud ENI integration enabled: false - # -- Enable bandwidth manager to optimize TCP and UDP workloads and allow # for rate-limiting traffic from individual Pods with EDT (Earliest Departure # Time) through the "kubernetes.io/egress-bandwidth" Pod annotation. @@ -369,19 +365,16 @@ bandwidthManager: enabled: false # -- Activate BBR TCP congestion control for Pods bbr: false - # -- Configure standalone NAT46/NAT64 gateway nat46x64Gateway: # -- Enable RFC8215-prefixed translation enabled: false - # -- EnableHighScaleIPcache enables the special ipcache mode for high scale # clusters. The ipcache content will be reduced to the strict minimum and # traffic will be encapsulated to carry security identities. highScaleIPcache: # -- Enable the high scale mode for the ipcache. enabled: false - # -- Configure L2 announcements l2announcements: # -- Enable L2 announcements @@ -392,14 +385,12 @@ l2announcements: # leaseRenewDeadline: 5s # -- The timeout between retries if renewal fails # leaseRetryPeriod: 2s - # -- Configure L2 pod announcements l2podAnnouncements: # -- Enable L2 pod announcements enabled: false # -- Interface used for sending Gratuitous ARP pod announcements interface: "eth0" - # -- Configure BGP bgp: # -- Enable BGP support inside Cilium; embeds a new ConfigMap for BGP inside @@ -410,7 +401,6 @@ bgp: loadbalancerIP: false # -- Enable announcement of node pod CIDR podCIDR: false - # -- This feature set enables virtual BGP routers to be created via # CiliumBGPPeeringPolicy CRDs. bgpControlPlane: @@ -422,12 +412,10 @@ bgpControlPlane: create: false # -- The name of the secret namespace to which Cilium agents are given read access name: kube-system - pmtuDiscovery: # -- Enable path MTU discovery to send ICMP fragmentation-needed replies to # the client. enabled: false - bpf: autoMount: # -- Enable automatic mount of BPF filesystem @@ -439,122 +427,154 @@ bpf: enabled: true # -- Configure the mount point for the BPF filesystem root: /sys/fs/bpf - # -- Enables pre-allocation of eBPF map values. This increases # memory usage but can reduce latency. preallocateMaps: false - + # @schema + # type: [null, integer] + # @schema # -- (int) Configure the maximum number of entries in auth map. # @default -- `524288` authMapMax: ~ - + # @schema + # type: [null, integer] + # @schema # -- (int) Configure the maximum number of entries in the TCP connection tracking # table. # @default -- `524288` ctTcpMax: ~ - + # @schema + # type: [null, integer] + # @schema # -- (int) Configure the maximum number of entries for the non-TCP connection # tracking table. # @default -- `262144` ctAnyMax: ~ - + # -- Control events generated by the Cilium datapath exposed to Cilium monitor and Hubble. + events: + drop: + # -- Enable drop events. + enabled: true + policyVerdict: + # -- Enable policy verdict events. + enabled: true + trace: + # -- Enable trace events. + enabled: true + # @schema + # type: [null, integer] + # @schema # -- Configure the maximum number of service entries in the # load balancer maps. lbMapMax: 65536 - + # @schema + # type: [null, integer] + # @schema # -- (int) Configure the maximum number of entries for the NAT table. # @default -- `524288` natMax: ~ - + # @schema + # type: [null, integer] + # @schema # -- (int) Configure the maximum number of entries for the neighbor table. # @default -- `524288` neighMax: ~ - # @schema # type: [null, integer] # @schema # @default -- `16384` # -- (int) Configures the maximum number of entries for the node table. nodeMapMax: ~ - # -- Configure the maximum number of entries in endpoint policy map (per endpoint). # @schema # type: [null, integer] # @schema policyMapMax: 16384 - + # @schema + # type: [null, number] + # @schema # -- (float64) Configure auto-sizing for all BPF maps based on available memory. # ref: https://docs.cilium.io/en/stable/network/ebpf/maps/ # @default -- `0.0025` mapDynamicSizeRatio: ~ - # -- Configure the level of aggregation for monitor notifications. # Valid options are none, low, medium, maximum. monitorAggregation: medium - # -- Configure the typical time between monitor notifications for # active connections. monitorInterval: "5s" - # -- Configure which TCP flags trigger notifications when seen for the # first time in a connection. monitorFlags: "all" - # -- Allow cluster external access to ClusterIP services. lbExternalClusterIP: false - + # @schema + # type: [null, boolean] + # @schema # -- (bool) Enable native IP masquerade support in eBPF # @default -- `false` masquerade: ~ - + # @schema + # type: [null, boolean] + # @schema # -- (bool) Configure whether direct routing mode should route traffic via # host stack (true) or directly and more efficiently out of BPF (false) if # the kernel supports it. The latter has the implication that it will also # bypass netfilter in the host namespace. # @default -- `false` hostLegacyRouting: ~ - + # @schema + # type: [null, boolean] + # @schema # -- (bool) Configure the eBPF-based TPROXY to reduce reliance on iptables rules # for implementing Layer 7 policy. # @default -- `false` tproxy: ~ - + # @schema + # type: [null, array] + # @schema # -- (list) Configure explicitly allowed VLAN id's for bpf logic bypass. # [0] will allow all VLAN id's without any filtering. # @default -- `[]` vlanBypass: ~ - + # -- (bool) Disable ExternalIP mitigation (CVE-2020-8554) + # @default -- `false` + disableExternalIPMitigation: false + # -- (bool) Attach endpoint programs using tcx instead of legacy tc hooks on + # supported kernels. + # @default -- `true` + enableTCX: true + # -- (string) Mode for Pod devices for the core datapath (veth, netkit, netkit-l2, lb-only) + # @default -- `veth` + datapathMode: veth # -- Enable BPF clock source probing for more efficient tick retrieval. bpfClockProbe: false - # -- Clean all eBPF datapath state from the initContainer of the cilium-agent # DaemonSet. # # WARNING: Use with care! cleanBpfState: false - # -- Clean all local Cilium state from the initContainer of the cilium-agent # DaemonSet. Implies cleanBpfState: true. # # WARNING: Use with care! cleanState: false - # -- Wait for KUBE-PROXY-CANARY iptables rule to appear in "wait-for-kube-proxy" # init container before launching cilium-agent. # More context can be found in the commit message of below PR # https://github.com/cilium/cilium/pull/20123 waitForKubeProxy: false - cni: # -- Install the CNI configuration and binary files into the filesystem. install: true - # -- Remove the CNI configuration and binary files on agent shutdown. Enable this # if you're removing Cilium from the cluster. Disable this to prevent the CNI # configuration file from being removed during agent upgrade, which can cause # nodes to go unmanageable. uninstall: false - + # @schema + # type: [null, string] + # @schema # -- Configure chaining on top of other CNI plugins. Possible values: # - none # - aws-cni @@ -562,34 +582,30 @@ cni: # - generic-veth # - portmap chainingMode: ~ - + # @schema + # type: [null, string] + # @schema # -- A CNI network name in to which the Cilium plugin should be added as a chained plugin. # This will cause the agent to watch for a CNI network with this network name. When it is # found, this will be used as the basis for Cilium's CNI configuration file. If this is # set, it assumes a chaining mode of generic-veth. As a special case, a chaining mode # of aws-cni implies a chainingTarget of aws-cni. chainingTarget: ~ - # -- Make Cilium take ownership over the `/etc/cni/net.d` directory on the # node, renaming all non-Cilium CNI configurations to `*.cilium_bak`. # This ensures no Pods can be scheduled using other CNI plugins during Cilium # agent downtime. exclusive: true - # -- Configure the log file for CNI logging with retention policy of 7 days. # Disable CNI file logging by setting this field to empty explicitly. logFile: /var/run/cilium/cilium-cni.log - # -- Skip writing of the CNI configuration. This can be used if # writing of the CNI configuration is performed by external automation. customConf: false - # -- Configure the path to the CNI configuration directory on the host. confPath: /etc/cni/net.d - # -- Configure the path to the CNI binary directory on the host. binPath: /opt/cni/bin - # -- Specify the path to a CNI config to read from on agent start. # This can be useful if you want to manage your CNI # configuration outside of a Kubernetes environment. This parameter is @@ -605,59 +621,48 @@ cni: # -- Configure the key in the CNI ConfigMap to read the contents of # the CNI configuration from. configMapKey: cni-config - # -- Configure the path to where to mount the ConfigMap inside the agent pod. confFileMountPath: /tmp/cni-configuration - # -- Configure the path to where the CNI configuration directory is mounted # inside the agent pod. hostConfDirMountPath: /host/etc/cni/net.d - # -- Specifies the resources for the cni initContainer resources: requests: cpu: 100m memory: 10Mi - + # -- Enable route MTU for pod netns when CNI chaining is used + enableRouteMTUForCNIChaining: false # -- (string) Configure how frequently garbage collection should occur for the datapath # connection tracking table. # @default -- `"0s"` conntrackGCInterval: "" - # -- (string) Configure the maximum frequency for the garbage collection of the # connection tracking table. Only affects the automatic computation for the frequency # and has no effect when 'conntrackGCInterval' is set. This can be set to more frequently # clean up unused identities created from ToFQDN policies. conntrackGCMaxInterval: "" - -# -- Configure container runtime specific integration. -# Deprecated in favor of bpf.autoMount.enabled. To be removed in 1.15. -containerRuntime: - # -- Enables specific integrations for container runtimes. - # Supported values: - # - crio - # - none - integration: none - # -- (string) Configure timeout in which Cilium will exit if CRDs are not available # @default -- `"5m"` crdWaitTimeout: "" - # -- Tail call hooks for custom eBPF programs. customCalls: # -- Enable tail call hooks for custom eBPF programs. enabled: false - daemon: # -- Configure where Cilium runtime state should be stored. runPath: "/var/run/cilium" - + # @schema + # type: [null, string] + # @schema # -- Configure a custom list of possible configuration override sources # The default is "config-map:cilium-config,cilium-node-config". For supported # values, see the help text for the build-config subcommand. # Note that this value should be a comma-separated string. configSources: ~ - + # @schema + # type: [null, string] + # @schema # -- allowedConfigOverrides is a list of config-map keys that can be overridden. # That is to say, if this value is set, config sources (excepting the first one) can # only override keys in this list. @@ -667,7 +672,9 @@ daemon: # By default, all keys may be overridden. To disable overrides, set this to "none" or # change the configSources variable. allowedConfigOverrides: ~ - + # @schema + # type: [null, string] + # @schema # -- blockedConfigOverrides is a list of config-map keys that may not be overridden. # In other words, if any of these keys appear in a configuration source excepting the # first one, they will be ignored @@ -676,7 +683,6 @@ daemon: # # By default, all keys may be overridden. blockedConfigOverrides: ~ - # -- Specify which network interfaces can run the eBPF datapath. This means # that a packet sent from a pod to a destination outside the cluster will be # masqueraded (to an output device IPv4 address), if the output device runs the @@ -688,8 +694,11 @@ daemon: # devices. When devices change the eBPF datapath is reloaded and services updated. # If "devices" is set then only those devices, or devices matching a wildcard will # be considered. -enableRuntimeDeviceDetection: false - +# +# This option has been deprecated and is a no-op. +enableRuntimeDeviceDetection: true +# -- Forces the auto-detection of devices, even if specific devices are explicitly listed +forceDeviceDetection: false # -- Chains to ignore when installing feeder rules. # disableIptablesFeederRules: "" @@ -702,65 +711,76 @@ enableRuntimeDeviceDetection: false # -- Enable Kubernetes EndpointSlice feature in Cilium if the cluster supports it. # enableK8sEndpointSlice: true -# -- Enable CiliumEndpointSlice feature. +# -- Enable CiliumEndpointSlice feature (deprecated, please use `ciliumEndpointSlice.enabled` instead). enableCiliumEndpointSlice: false - +ciliumEndpointSlice: + # -- Enable Cilium EndpointSlice feature. + enabled: false + # -- List of rate limit options to be used for the CiliumEndpointSlice controller. + # Each object in the list must have the following fields: + # nodes: Count of nodes at which to apply the rate limit. + # limit: The sustained request rate in requests per second. The maximum rate that can be configured is 50. + # burst: The burst request rate in requests per second. The maximum burst that can be configured is 100. + rateLimits: + - nodes: 0 + limit: 10 + burst: 20 + - nodes: 100 + limit: 7 + burst: 15 + - nodes: 500 + limit: 5 + burst: 10 envoyConfig: # -- Enable CiliumEnvoyConfig CRD # CiliumEnvoyConfig CRD can also be implicitly enabled by other options. enabled: false - # -- SecretsNamespace is the namespace in which envoy SDS will retrieve secrets from. secretsNamespace: # -- Create secrets namespace for CiliumEnvoyConfig CRDs. create: true - # -- The name of the secret namespace to which Cilium agents are given read access. name: cilium-secrets - + # -- Interval in which an attempt is made to reconcile failed EnvoyConfigs. If the duration is zero, the retry is deactivated. + retryInterval: 15s ingressController: # -- Enable cilium ingress controller # This will automatically set enable-envoy-config as well. enabled: false - # -- Set cilium ingress controller to be the default ingress controller # This will let cilium ingress controller route entries without ingress class set default: false - # -- Default ingress load balancer mode # Supported values: shared, dedicated - # For granular control, use the following annotations on the ingress resource - # ingress.cilium.io/loadbalancer-mode: shared|dedicated, + # For granular control, use the following annotations on the ingress resource: + # "ingress.cilium.io/loadbalancer-mode: dedicated" (or "shared"). loadbalancerMode: dedicated - # -- Enforce https for host having matching TLS host in Ingress. # Incoming traffic to http listener will return 308 http error code with respective location in header. enforceHttps: true - # -- Enable proxy protocol for all Ingress listeners. Note that _only_ Proxy protocol traffic will be accepted once this is enabled. enableProxyProtocol: false - # -- IngressLBAnnotations are the annotation and label prefixes, which are used to filter annotations and/or labels to propagate from Ingress to the Load Balancer service - ingressLBAnnotationPrefixes: ['service.beta.kubernetes.io', 'service.kubernetes.io', 'cloud.google.com'] - + ingressLBAnnotationPrefixes: ['lbipam.cilium.io', 'nodeipam.cilium.io', 'service.beta.kubernetes.io', 'service.kubernetes.io', 'cloud.google.com'] + # @schema + # type: [null, string] + # @schema # -- Default secret namespace for ingresses without .spec.tls[].secretName set. defaultSecretNamespace: - + # @schema + # type: [null, string] + # @schema # -- Default secret name for ingresses without .spec.tls[].secretName set. defaultSecretName: - # -- SecretsNamespace is the namespace in which envoy SDS will retrieve TLS secrets from. secretsNamespace: # -- Create secrets namespace for Ingress. create: true - # -- Name of Ingress secret namespace. name: cilium-secrets - # -- Enable secret sync, which will make sure all TLS secrets used by Ingress are synced to secretsNamespace.name. # If disabled, TLS secrets must be maintained externally. sync: true - # -- Load-balancer service in shared mode. # This is a single load-balancer service for all Ingress resources. service: @@ -772,130 +792,150 @@ ingressController: annotations: {} # -- Service type for the shared LB service type: LoadBalancer + # @schema + # type: [null, integer] + # @schema # -- Configure a specific nodePort for insecure HTTP traffic on the shared LB service insecureNodePort: ~ + # @schema + # type: [null, integer] + # @schema # -- Configure a specific nodePort for secure HTTPS traffic on the shared LB service - secureNodePort : ~ + secureNodePort: ~ + # @schema + # type: [null, string] + # @schema # -- Configure a specific loadBalancerClass on the shared LB service (requires Kubernetes 1.24+) loadBalancerClass: ~ + # @schema + # type: [null, string] + # @schema # -- Configure a specific loadBalancerIP on the shared LB service - loadBalancerIP : ~ + loadBalancerIP: ~ + # @schema + # type: [null, boolean] + # @schema # -- Configure if node port allocation is required for LB service # ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-nodeport-allocation allocateLoadBalancerNodePorts: ~ - + # -- Control how traffic from external sources is routed to the LoadBalancer Kubernetes Service for Cilium Ingress in shared mode. + # Valid values are "Cluster" and "Local". + # ref: https://kubernetes.io/docs/reference/networking/virtual-ips/#external-traffic-policy + externalTrafficPolicy: Cluster + # Host Network related configuration + hostNetwork: + # -- Configure whether the Envoy listeners should be exposed on the host network. + enabled: false + # -- Configure a specific port on the host network that gets used for the shared listener. + sharedListenerPort: 8080 + # Specify the nodes where the Ingress listeners should be exposed + nodes: + # -- Specify the labels of the nodes where the Ingress listeners should be exposed + # + # matchLabels: + # kubernetes.io/os: linux + # kubernetes.io/hostname: kind-worker + matchLabels: {} gatewayAPI: # -- Enable support for Gateway API in cilium # This will automatically set enable-envoy-config as well. enabled: false - + # -- Enable proxy protocol for all GatewayAPI listeners. Note that _only_ Proxy protocol traffic will be accepted once this is enabled. + enableProxyProtocol: false + # -- Enable Backend Protocol selection support (GEP-1911) for Gateway API via appProtocol. + enableAppProtocol: false + # -- Enable ALPN for all listeners configured with Gateway API. ALPN will attempt HTTP/2, then HTTP 1.1. + # Note that this will also enable `appProtocol` support, and services that wish to use HTTP/2 will need to indicate that via their `appProtocol`. + enableAlpn: false + # -- The number of additional GatewayAPI proxy hops from the right side of the HTTP header to trust when determining the origin client's IP address. + xffNumTrustedHops: 0 + # -- Control how traffic from external sources is routed to the LoadBalancer Kubernetes Service for all Cilium GatewayAPI Gateway instances. Valid values are "Cluster" and "Local". + # Note that this value will be ignored when `hostNetwork.enabled == true`. + # ref: https://kubernetes.io/docs/reference/networking/virtual-ips/#external-traffic-policy + externalTrafficPolicy: Cluster + gatewayClass: + # -- Enable creation of GatewayClass resource + # The default value is 'auto' which decides according to presence of gateway.networking.k8s.io/v1/GatewayClass in the cluster. + # Other possible values are 'true' and 'false', which will either always or never create the GatewayClass, respectively. + create: auto # -- SecretsNamespace is the namespace in which envoy SDS will retrieve TLS secrets from. secretsNamespace: # -- Create secrets namespace for Gateway API. create: true - # -- Name of Gateway API secret namespace. name: cilium-secrets - # -- Enable secret sync, which will make sure all TLS secrets used by Ingress are synced to secretsNamespace.name. # If disabled, TLS secrets must be maintained externally. sync: true - + # Host Network related configuration + hostNetwork: + # -- Configure whether the Envoy listeners should be exposed on the host network. + enabled: false + # Specify the nodes where the Ingress listeners should be exposed + nodes: + # -- Specify the labels of the nodes where the Ingress listeners should be exposed + # + # matchLabels: + # kubernetes.io/os: linux + # kubernetes.io/hostname: kind-worker + matchLabels: {} # -- Enables the fallback compatibility solution for when the xt_socket kernel # module is missing and it is needed for the datapath L7 redirection to work # properly. See documentation for details on when this can be disabled: # https://docs.cilium.io/en/stable/operations/system_requirements/#linux-kernel. enableXTSocketFallback: true - encryption: # -- Enable transparent network encryption. enabled: false - # -- Encryption method. Can be either ipsec or wireguard. type: ipsec - # -- Enable encryption for pure node to node traffic. # This option is only effective when encryption.type is set to "wireguard". nodeEncryption: false - # -- Configure the WireGuard Pod2Pod strict mode. strictMode: # -- Enable WireGuard Pod2Pod strict mode. enabled: false - # -- CIDR for the WireGuard Pod2Pod strict mode. cidr: "" - # -- Allow dynamic lookup of remote node identities. # This is required when tunneling is used or direct routing is used and the node CIDR and pod CIDR overlap. allowRemoteNodeIdentities: false - ipsec: # -- Name of the key file inside the Kubernetes secret configured via secretName. - keyFile: "" - + keyFile: keys # -- Path to mount the secret inside the Cilium pod. - mountPath: "" - + mountPath: /etc/ipsec # -- Name of the Kubernetes secret containing the encryption keys. - secretName: "" - + secretName: cilium-ipsec-keys # -- The interface to use for encrypted traffic. interface: "" - # -- Enable the key watcher. If disabled, a restart of the agent will be # necessary on key rotations. keyWatcher: true - # -- Maximum duration of the IPsec key rotation. The previous key will be # removed after that delay. keyRotationDuration: "5m" - + # -- Enable IPsec encrypted overlay + encryptedOverlay: false wireguard: - # -- Enables the fallback to the user-space implementation. + # -- Enables the fallback to the user-space implementation (deprecated). userspaceFallback: false - # -- Controls Wireguard PersistentKeepalive option. Set 0s to disable. + # -- Controls WireGuard PersistentKeepalive option. Set 0s to disable. persistentKeepalive: 0s - - # -- Deprecated in favor of encryption.ipsec.keyFile. To be removed in 1.15. - # Name of the key file inside the Kubernetes secret configured via secretName. - # This option is only effective when encryption.type is set to ipsec. - keyFile: keys - - # -- Deprecated in favor of encryption.ipsec.mountPath. To be removed in 1.15. - # Path to mount the secret inside the Cilium pod. - # This option is only effective when encryption.type is set to ipsec. - mountPath: /etc/ipsec - - # -- Deprecated in favor of encryption.ipsec.secretName. To be removed in 1.15. - # Name of the Kubernetes secret containing the encryption keys. - # This option is only effective when encryption.type is set to ipsec. - secretName: cilium-ipsec-keys - - # -- Deprecated in favor of encryption.ipsec.interface. To be removed in 1.15. - # The interface to use for encrypted traffic. - # This option is only effective when encryption.type is set to ipsec. - interface: "" - endpointHealthChecking: # -- Enable connectivity health checking between virtual endpoints. enabled: true - -# -- Enable endpoint status. -# Status can be: policy, health, controllers, log and / or state. For 2 or more options use a space. -endpointStatus: - enabled: false - status: "" - endpointRoutes: + # @schema + # type: [boolean, string] + # @schema # -- Enable use of per endpoint routes instead of routing via # the cilium_host interface. enabled: false - k8sNetworkPolicy: # -- Enable support for K8s NetworkPolicy enabled: true - eni: # -- Enable Elastic Network Interface (ENI) integration. enabled: false @@ -934,51 +974,46 @@ eni: # -- Filter via AWS EC2 Instance tags (k=v) which will dictate which AWS EC2 Instances # are going to be used to create new ENIs instanceTagsFilter: [] - externalIPs: # -- Enable ExternalIPs service support. enabled: false - # fragmentTracking enables IPv4 fragment tracking support in the datapath. # fragmentTracking: true - gke: # -- Enable Google Kubernetes Engine integration enabled: false - # -- Enable connectivity health checking. healthChecking: true - # -- TCP port for the agent health API. This is not the port for cilium-health. healthPort: 9879 - # -- Configure the host firewall. hostFirewall: # -- Enables the enforcement of host policies in the eBPF datapath. enabled: false - hostPort: # -- Enable hostPort service support. enabled: false - # -- Configure socket LB socketLB: # -- Enable socket LB enabled: false - # -- Disable socket lb for non-root ns. This is used to enable Istio routing rules. # hostNamespaceOnly: false - + # -- Enable terminating pod connections to deleted service backends. + # terminatePodConnections: true # -- Configure certificate generation for Hubble integration. # If hubble.tls.auto.method=cronJob, these values are used # for the Kubernetes CronJob which will be scheduled regularly to # (re)generate any certificates not provided manually. certgen: image: + # @schema + # type: [null, string] + # @schema override: ~ repository: "quay.io/cilium/certgen" - tag: "v0.1.12" - digest: "sha256:bbc5e65e9dc65bc6b58967fe536b7f3b54e12332908aeb0a96a36866b4372b4e" + tag: "v0.2.0" + digest: "sha256:169d93fd8f2f9009db3b9d5ccd37c2b753d0989e1e7cd8fe79f9160c459eef4f" useDigest: true pullPolicy: "IfNotPresent" # -- Seconds after which the completed job pod will be deleted @@ -992,23 +1027,17 @@ certgen: # -- Node tolerations for pod assignment on nodes with taints # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ tolerations: [] - # -- Additional certgen volumes. extraVolumes: [] - # -- Additional certgen volumeMounts. extraVolumeMounts: [] - # -- Affinity for certgen affinity: {} - hubble: # -- Enable Hubble (true by default). enabled: true - # -- Annotations to be added to all top-level hubble objects (resources under templates/hubble) annotations: {} - # -- Buffer size of the channel Hubble uses to receive monitor events. If this # value is not set, the queue size is set to the default monitor queue size. # eventQueueSize: "" @@ -1023,6 +1052,9 @@ hubble: # See https://docs.cilium.io/en/stable/observability/metrics/#hubble-metrics # for more comprehensive documentation about Hubble metrics. metrics: + # @schema + # type: [null, array] + # @schema # -- Configures the list of metrics to collect. If empty or null, metrics # are disabled. # Example: @@ -1044,6 +1076,32 @@ hubble: enableOpenMetrics: false # -- Configure the port the hubble metric server listens on. port: 9965 + tls: + # Enable hubble metrics server TLS. + enabled: false + # Configure hubble metrics server TLS. + server: + # -- base64 encoded PEM values for the Hubble metrics server certificate. + cert: "" + # -- base64 encoded PEM values for the Hubble metrics server key. + key: "" + # -- Extra DNS names added to certificate when it's auto generated + extraDnsNames: [] + # -- Extra IP addresses added to certificate when it's auto generated + extraIpAddresses: [] + # -- Configure mTLS for the Hubble metrics server. + mtls: + # When set to true enforces mutual TLS between Hubble Metrics server and its clients. + # False allow non-mutual TLS connections. + # This option has no effect when TLS is disabled. + enabled: false + useSecret: false + # -- Name of the ConfigMap containing the CA to validate client certificates against. + # If mTLS is enabled and this is unspecified, it will default to the + # same CA used for Hubble metrics server certificates. + name: ~ + # -- Entry of the ConfigMap containing the CA. + key: ca.crt # -- Annotations to be added to hubble-metrics service. serviceAnnotations: {} serviceMonitor: @@ -1065,21 +1123,29 @@ hubble: - __meta_kubernetes_pod_node_name targetLabel: node replacement: ${1} + # @schema + # type: [null, array] + # @schema # -- Metrics relabeling configs for the ServiceMonitor hubble metricRelabelings: ~ + # Configure TLS for the ServiceMonitor. + # Note, when using TLS you will either need to specify + # tlsConfig.insecureSkipVerify or specify a CA to use. + tlsConfig: {} # -- Grafana dashboards for hubble # grafana can import dashboards based on the label and value # ref: https://github.com/grafana/helm-charts/tree/main/charts/grafana#sidecar-for-dashboards dashboards: enabled: false label: grafana_dashboard + # @schema + # type: [null, string] + # @schema namespace: ~ labelValue: "1" annotations: {} - # -- Unix domain socket path to listen to when Hubble is enabled. socketPath: /var/run/cilium/hubble.sock - # -- Enables redacting sensitive information present in Layer 7 flows. redact: enabled: false @@ -1154,17 +1220,18 @@ hubble: # --set hubble.redact.enabled="true" # --set hubble.redact.kafka.apiKey="true" apiKey: false - # -- An additional address for Hubble to listen to. # Set this field ":4244" if you are enabling Hubble Relay, as it assumes that # Hubble is listening on port 4244. listenAddress: ":4244" # -- Whether Hubble should prefer to announce IPv6 or IPv4 addresses if both are available. preferIpv6: false + # @schema + # type: [null, boolean] + # @schema # -- (bool) Skip Hubble events with unknown cgroup ids # @default -- `true` skipUnknownCGroupIDs: ~ - peerService: # -- Service Port for the Peer service. # If not set, it is dynamically assigned to port 443 if TLS is enabled and to @@ -1207,7 +1274,6 @@ hubble: # Defaults to midnight of the first day of every fourth month. For syntax, see # https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#schedule-syntax schedule: "0 0 1 */4 *" - # [Example] # certManagerIssuerRef: # group: cert-manager.io @@ -1215,7 +1281,6 @@ hubble: # name: ca-issuer # -- certmanager issuer used when hubble.tls.auto.method=certmanager. certManagerIssuerRef: {} - # -- base64 encoded PEM values for the Hubble server certificate and private key server: cert: "" @@ -1224,99 +1289,91 @@ hubble: extraDnsNames: [] # -- Extra IP addresses added to certificate when it's auto generated extraIpAddresses: [] - relay: # -- Enable Hubble Relay (requires hubble.enabled=true) enabled: false - # -- Roll out Hubble Relay pods automatically when configmap is updated. rollOutPods: false - # -- Hubble-relay container image. image: + # @schema + # type: [null, string] + # @schema override: ~ repository: "quay.io/cilium/hubble-relay" - tag: "v1.15.6" - # hubble-relay-digest - digest: "sha256:a0863dd70d081b273b87b9b7ce7e2d3f99171c2f5e202cd57bc6691e51283e0c" + tag: "v1.16.0" + # hubble-relay-digest + digest: "sha256:33fca7776fc3d7b2abe08873319353806dc1c5e07e12011d7da4da05f836ce8d" useDigest: true pullPolicy: "IfNotPresent" - # -- Specifies the resources for the hubble-relay pods resources: {} - # -- Number of replicas run for the hubble-relay deployment. replicas: 1 - # -- Affinity for hubble-replay affinity: podAffinity: requiredDuringSchedulingIgnoredDuringExecution: - - topologyKey: kubernetes.io/hostname - labelSelector: - matchLabels: - k8s-app: cilium - + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: cilium # -- Pod topology spread constraints for hubble-relay topologySpreadConstraints: [] - # - maxSkew: 1 - # topologyKey: topology.kubernetes.io/zone - # whenUnsatisfiable: DoNotSchedule + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule # -- Node labels for pod assignment # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector nodeSelector: kubernetes.io/os: linux - # -- Node tolerations for pod assignment on nodes with taints # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ tolerations: [] - # -- Additional hubble-relay environment variables. extraEnv: [] - # -- Annotations to be added to all top-level hubble-relay objects (resources under templates/hubble-relay) annotations: {} - # -- Annotations to be added to hubble-relay pods podAnnotations: {} - # -- Labels to be added to hubble-relay pods podLabels: {} - # PodDisruptionBudget settings podDisruptionBudget: # -- enable PodDisruptionBudget # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ enabled: false + # @schema + # type: [null, integer, string] + # @schema # -- Minimum number/percentage of pods that should remain scheduled. # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` minAvailable: null + # @schema + # type: [null, integer, string] + # @schema # -- Maximum number/percentage of pods that may be made unavailable maxUnavailable: 1 - # -- The priority class to use for hubble-relay priorityClassName: "" - # -- Configure termination grace period for hubble relay Deployment. terminationGracePeriodSeconds: 1 - # -- hubble-relay update strategy updateStrategy: type: RollingUpdate rollingUpdate: + # @schema + # type: [integer, string] + # @schema maxUnavailable: 1 - # -- Additional hubble-relay volumes. extraVolumes: [] - # -- Additional hubble-relay volumeMounts. extraVolumeMounts: [] - # -- hubble-relay pod security context podSecurityContext: fsGroup: 65532 - # -- hubble-relay container security context securityContext: # readOnlyRootFilesystem: true @@ -1325,21 +1382,17 @@ hubble: runAsGroup: 65532 capabilities: drop: - - ALL - + - ALL # -- hubble-relay service configuration. service: # --- The type of service used for Hubble Relay access, either ClusterIP or NodePort. type: ClusterIP # --- The port to use when the service type is set to NodePort. nodePort: 31234 - # -- Host to listen to. Specify an empty string to bind to all the interfaces. listenHost: "" - # -- Port to listen to. listenPort: "4245" - # -- TLS configuration for Hubble Relay tls: # -- base64 encoded PEM values for the hubble-relay client certificate and private key @@ -1366,26 +1419,33 @@ hubble: # -- extra IP addresses added to certificate when its auto gen extraIpAddresses: [] # DNS name used by the backend to connect to the relay - # This is a simple workaround as the relay certificates are currently hardcoded to - # *.hubble-relay.cilium.io + # This is a simple workaround as the relay certificates are currently hardcoded to + # *.hubble-relay.cilium.io # See https://github.com/cilium/cilium/pull/28709#discussion_r1371792546 # For GKE Dataplane V2 this should be set to relay.kube-system.svc.cluster.local relayName: "ui.hubble-relay.cilium.io" - + # @schema + # type: [null, string] + # @schema # -- Dial timeout to connect to the local hubble instance to receive peer information (e.g. "30s"). dialTimeout: ~ - + # @schema + # type: [null, string] + # @schema # -- Backoff duration to retry connecting to the local hubble instance in case of failure (e.g. "30s"). retryTimeout: ~ - - # -- Max number of flows that can be buffered for sorting before being sent to the + # @schema + # type: [null, integer] + # @schema + # -- (int) Max number of flows that can be buffered for sorting before being sent to the # client (per request) (e.g. 100). sortBufferLenMax: ~ - + # @schema + # type: [null, string] + # @schema # -- When the per-request flows sort buffer is not full, a flow is drained every # time this timeout is reached (only affects requests in follow-mode) (e.g. "1s"). sortBufferDrainTimeout: ~ - # -- Port to use for the k8s service backed by hubble-relay pods. # If not set, it is dynamically assigned to port 443 if TLS is enabled and to # port 80 if not. @@ -1409,17 +1469,21 @@ hubble: # -- Specify the Kubernetes namespace where Prometheus expects to find # service monitors configured. # namespace: "" + # @schema + # type: [null, array] + # @schema # -- Relabeling configs for the ServiceMonitor hubble-relay relabelings: ~ + # @schema + # type: [null, array] + # @schema # -- Metrics relabeling configs for the ServiceMonitor hubble-relay metricRelabelings: ~ - gops: # -- Enable gops for hubble-relay enabled: true # -- Configure gops listen port for hubble-relay port: 9893 - pprof: # -- Enable pprof for hubble-relay enabled: false @@ -1427,38 +1491,33 @@ hubble: address: localhost # -- Configure pprof listen port for hubble-relay port: 6062 - ui: # -- Whether to enable the Hubble UI. enabled: false - standalone: # -- When true, it will allow installing the Hubble UI only, without checking dependencies. # It is useful if a cluster already has cilium and Hubble relay installed and you just # want Hubble UI to be deployed. # When installed via helm, installing UI should be done via `helm upgrade` and when installed via the cilium cli, then `cilium hubble enable --ui` enabled: false - tls: # -- When deploying Hubble UI in standalone, with tls enabled for Hubble relay, it is required # to provide a volume for mounting the client certificates. certsVolume: {} - # projected: - # defaultMode: 0400 - # sources: - # - secret: - # name: hubble-ui-client-certs - # items: - # - key: tls.crt - # path: client.crt - # - key: tls.key - # path: client.key - # - key: ca.crt - # path: hubble-relay-ca.crt - + # projected: + # defaultMode: 0400 + # sources: + # - secret: + # name: hubble-ui-client-certs + # items: + # - key: tls.crt + # path: client.crt + # - key: tls.key + # path: client.key + # - key: ca.crt + # path: hubble-relay-ca.crt # -- Roll out Hubble-ui pods automatically when configmap is updated. rollOutPods: false - tls: # -- base64 encoded PEM values used to connect to hubble-relay # This keypair is presented to Hubble Relay instances for mTLS @@ -1467,37 +1526,32 @@ hubble: client: cert: "" key: "" - backend: # -- Hubble-ui backend image. image: + # @schema + # type: [null, string] + # @schema override: ~ repository: "quay.io/cilium/hubble-ui-backend" - tag: "v0.13.0" - digest: "sha256:1e7657d997c5a48253bb8dc91ecee75b63018d16ff5e5797e5af367336bc8803" + tag: "v0.13.1" + digest: "sha256:0e0eed917653441fded4e7cdb096b7be6a3bddded5a2dd10812a27b1fc6ed95b" useDigest: true pullPolicy: "IfNotPresent" - # -- Hubble-ui backend security context. securityContext: {} - # -- Additional hubble-ui backend environment variables. extraEnv: [] - # -- Additional hubble-ui backend volumes. extraVolumes: [] - # -- Additional hubble-ui backend volumeMounts. extraVolumeMounts: [] - livenessProbe: # -- Enable liveness probe for Hubble-ui backend (requires Hubble-ui 0.12+) enabled: false - readinessProbe: # -- Enable readiness probe for Hubble-ui backend (requires Hubble-ui 0.12+) enabled: false - # -- Resource requests and limits for the 'backend' container of the 'hubble-ui' deployment. resources: {} # limits: @@ -1506,29 +1560,26 @@ hubble: # requests: # cpu: 100m # memory: 64Mi - frontend: # -- Hubble-ui frontend image. image: + # @schema + # type: [null, string] + # @schema override: ~ repository: "quay.io/cilium/hubble-ui" - tag: "v0.13.0" - digest: "sha256:7d663dc16538dd6e29061abd1047013a645e6e69c115e008bee9ea9fef9a6666" + tag: "v0.13.1" + digest: "sha256:e2e9313eb7caf64b0061d9da0efbdad59c6c461f6ca1752768942bfeda0796c6" useDigest: true pullPolicy: "IfNotPresent" - # -- Hubble-ui frontend security context. securityContext: {} - # -- Additional hubble-ui frontend environment variables. extraEnv: [] - # -- Additional hubble-ui frontend volumes. extraVolumes: [] - # -- Additional hubble-ui frontend volumeMounts. extraVolumeMounts: [] - # -- Resource requests and limits for the 'frontend' container of the 'hubble-ui' deployment. resources: {} # limits: @@ -1541,63 +1592,60 @@ hubble: # -- Controls server listener for ipv6 ipv6: enabled: true - # -- The number of replicas of Hubble UI to deploy. replicas: 1 - # -- Annotations to be added to all top-level hubble-ui objects (resources under templates/hubble-ui) annotations: {} - # -- Annotations to be added to hubble-ui pods podAnnotations: {} - # -- Labels to be added to hubble-ui pods podLabels: {} - # PodDisruptionBudget settings podDisruptionBudget: # -- enable PodDisruptionBudget # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ enabled: false + # @schema + # type: [null, integer, string] + # @schema # -- Minimum number/percentage of pods that should remain scheduled. # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` minAvailable: null + # @schema + # type: [null, integer, string] + # @schema # -- Maximum number/percentage of pods that may be made unavailable maxUnavailable: 1 - # -- Affinity for hubble-ui affinity: {} - # -- Pod topology spread constraints for hubble-ui topologySpreadConstraints: [] - # - maxSkew: 1 - # topologyKey: topology.kubernetes.io/zone - # whenUnsatisfiable: DoNotSchedule + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule # -- Node labels for pod assignment # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector nodeSelector: kubernetes.io/os: linux - # -- Node tolerations for pod assignment on nodes with taints # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ tolerations: [] - # -- The priority class to use for hubble-ui priorityClassName: "" - # -- hubble-ui update strategy. updateStrategy: type: RollingUpdate rollingUpdate: + # @schema + # type: [integer, string] + # @schema maxUnavailable: 1 - # -- Security context to be added to Hubble UI pods securityContext: runAsUser: 1001 runAsGroup: 1001 fsGroup: 1001 - # -- hubble-ui service configuration. service: # -- Annotations to be added for the Hubble UI service @@ -1606,18 +1654,16 @@ hubble: type: ClusterIP # --- The port to use when the service type is set to NodePort. nodePort: 31235 - # -- Defines base url prefix for all hubble-ui http requests. # It needs to be changed in case if ingress for hubble-ui is configured under some sub-path. # Trailing `/` is required for custom path, ex. `/service-map/` baseUrl: "/" - # -- hubble-ui ingress configuration. ingress: enabled: false annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" className: "" hosts: - chart-example.local @@ -1626,7 +1672,6 @@ hubble: # - secretName: chart-example-tls # hosts: # - chart-example.local - # -- Hubble flows export. export: # --- Defines max file size of output file before it gets rotated. @@ -1660,35 +1705,42 @@ hubble: createConfigMap: true # ---- Exporters configuration in YAML format. content: - - name: all - fieldMask: [] - includeFilters: [] - excludeFilters: [] - filePath: "/var/run/cilium/hubble/events.log" - #- name: "test002" - # filePath: "/var/log/network/flow-log/pa/test002.log" - # fieldMask: ["source.namespace", "source.pod_name", "destination.namespace", "destination.pod_name", "verdict"] - # includeFilters: - # - source_pod: ["default/"] - # event_type: - # - type: 1 - # - destination_pod: ["frontend/nginx-975996d4c-7hhgt"] - # excludeFilters: [] - # end: "2023-10-09T23:59:59-07:00" - + - name: all + fieldMask: [] + includeFilters: [] + excludeFilters: [] + filePath: "/var/run/cilium/hubble/events.log" + # - name: "test002" + # filePath: "/var/log/network/flow-log/pa/test002.log" + # fieldMask: ["source.namespace", "source.pod_name", "destination.namespace", "destination.pod_name", "verdict"] + # includeFilters: + # - source_pod: ["default/"] + # event_type: + # - type: 1 + # - destination_pod: ["frontend/nginx-975996d4c-7hhgt"] + # excludeFilters: [] + # end: "2023-10-09T23:59:59-07:00" + # -- Emit v1.Events related to pods on detection of packet drops. + # This feature is alpha, please provide feedback at https://github.com/cilium/cilium/issues/33975. + dropEventEmitter: + enabled: false + # --- Minimum time between emitting same events. + interval: 2m + # --- Drop reasons to emit events for. + # ref: https://docs.cilium.io/en/stable/_api/v1/flow/README/#dropreason + reasons: + - auth_required + - policy_denied # -- Method to use for identity allocation (`crd` or `kvstore`). identityAllocationMode: "crd" - # -- (string) Time to wait before using new identity on endpoint identity change. # @default -- `"5s"` identityChangeGracePeriod: "" - # -- Install Iptables rules to skip netfilter connection tracking on all pod # traffic. This option is only effective when Cilium is running in direct # routing and full KPR mode. Moreover, this option cannot be enabled when Cilium # is running in a managed Kubernetes environment or in a chained CNI setup. installNoConntrackIptablesRules: false - ipam: # -- Configure IP Address Management mode. # ref: https://docs.cilium.io/en/stable/network/concepts/ipam/ @@ -1696,75 +1748,85 @@ ipam: # -- Maximum rate at which the CiliumNode custom resource is updated. ciliumNodeUpdateRate: "15s" operator: + # @schema + # type: [array, string] + # @schema # -- IPv4 CIDR list range to delegate to individual nodes for IPAM. clusterPoolIPv4PodCIDRList: ["10.0.0.0/8"] # -- IPv4 CIDR mask size to delegate to individual nodes for IPAM. clusterPoolIPv4MaskSize: 24 + # @schema + # type: [array, string] + # @schema # -- IPv6 CIDR list range to delegate to individual nodes for IPAM. clusterPoolIPv6PodCIDRList: ["fd00::/104"] # -- IPv6 CIDR mask size to delegate to individual nodes for IPAM. clusterPoolIPv6MaskSize: 120 # -- IP pools to auto-create in multi-pool IPAM mode. autoCreateCiliumPodIPPools: {} - # default: - # ipv4: - # cidrs: - # - 10.10.0.0/8 - # maskSize: 24 - # other: - # ipv6: - # cidrs: - # - fd00:100::/80 - # maskSize: 96 - # -- The maximum burst size when rate limiting access to external APIs. + # default: + # ipv4: + # cidrs: + # - 10.10.0.0/8 + # maskSize: 24 + # other: + # ipv6: + # cidrs: + # - fd00:100::/80 + # maskSize: 96 + # @schema + # type: [null, integer] + # @schema + # -- (int) The maximum burst size when rate limiting access to external APIs. # Also known as the token bucket capacity. # @default -- `20` externalAPILimitBurstSize: ~ - # -- The maximum queries per second when rate limiting access to + # @schema + # type: [null, number] + # @schema + # -- (float) The maximum queries per second when rate limiting access to # external APIs. Also known as the bucket refill rate, which is used to # refill the bucket up to the burst size capacity. # @default -- `4.0` externalAPILimitQPS: ~ - +nodeIPAM: + # -- Configure Node IPAM + # ref: https://docs.cilium.io/en/stable/network/node-ipam/ + enabled: false +# @schema +# type: [null, string] +# @schema # -- The api-rate-limit option can be used to overwrite individual settings of the default configuration for rate limiting calls to the Cilium Agent API apiRateLimit: ~ - # -- Configure the eBPF-based ip-masq-agent ipMasqAgent: enabled: false # the config of nonMasqueradeCIDRs # config: - # nonMasqueradeCIDRs: [] - # masqLinkLocal: false - # masqLinkLocalIPv6: false +# nonMasqueradeCIDRs: [] +# masqLinkLocal: false +# masqLinkLocalIPv6: false # iptablesLockTimeout defines the iptables "--wait" option when invoked from Cilium. # iptablesLockTimeout: "5s" - ipv4: # -- Enable IPv4 support. enabled: true - ipv6: # -- Enable IPv6 support. enabled: false - # -- Configure Kubernetes specific configuration -k8s: {} +k8s: # -- requireIPv4PodCIDR enables waiting for Kubernetes to provide the PodCIDR # range via the Kubernetes node resource - # requireIPv4PodCIDR: false - + requireIPv4PodCIDR: false # -- requireIPv6PodCIDR enables waiting for Kubernetes to provide the PodCIDR # range via the Kubernetes node resource - # requireIPv6PodCIDR: false - + requireIPv6PodCIDR: false # -- Keep the deprecated selector labels when deploying Cilium DaemonSet. keepDeprecatedLabels: false - # -- Keep the deprecated probes when deploying Cilium DaemonSet keepDeprecatedProbes: false - startupProbe: # -- failure threshold of startup probe. # 105 x 2s translates to the old behaviour of the readiness probe (120s delay + 30 x 3s) @@ -1781,9 +1843,8 @@ readinessProbe: failureThreshold: 3 # -- interval between checks of the readiness probe periodSeconds: 30 - # -- Configure the kube-proxy replacement in Cilium BPF datapath -# Valid options are "true", "false", "disabled" (deprecated), "partial" (deprecated), "strict" (deprecated). +# Valid options are "true" or "false". # ref: https://docs.cilium.io/en/stable/network/kubernetes/kubeproxy-free/ #kubeProxyReplacement: "false" @@ -1792,19 +1853,15 @@ readinessProbe: # addresses and this '[::]:10256' for all ipv6 addresses. # By default it is disabled. kubeProxyReplacementHealthzBindAddr: "" - l2NeighDiscovery: # -- Enable L2 neighbor discovery in the agent enabled: true # -- Override the agent's default neighbor resolution refresh period. refreshPeriod: "30s" - # -- Enable Layer 7 network policy. l7Proxy: true - # -- Enable Local Redirect Policy. localRedirectPolicy: false - # To include or exclude matched resources from cilium identity evaluation # labels: "" @@ -1814,56 +1871,45 @@ localRedirectPolicy: false # -- Enables periodic logging of system load logSystemLoad: false - # -- Configure maglev consistent hashing maglev: {} - # -- tableSize is the size (parameter M) for the backend table of one - # service entry - # tableSize: +# -- tableSize is the size (parameter M) for the backend table of one +# service entry +# tableSize: - # -- hashSeed is the cluster-wide base64 encoded seed for the hashing - # hashSeed: +# -- hashSeed is the cluster-wide base64 encoded seed for the hashing +# hashSeed: # -- Enables masquerading of IPv4 traffic leaving the node from endpoints. enableIPv4Masquerade: true - # -- Enables masquerading of IPv6 traffic leaving the node from endpoints. enableIPv6Masquerade: true - # -- Enables masquerading to the source of the route for traffic leaving the node from endpoints. enableMasqueradeRouteSource: false - # -- Enables IPv4 BIG TCP support which increases maximum IPv4 GSO/GRO limits for nodes and pods enableIPv4BIGTCP: false - # -- Enables IPv6 BIG TCP support which increases maximum IPv6 GSO/GRO limits for nodes and pods enableIPv6BIGTCP: false - egressGateway: # -- Enables egress gateway to redirect and SNAT the traffic that leaves the # cluster. enabled: false - # -- Deprecated without a replacement necessary. - installRoutes: false # -- Time between triggers of egress gateway state reconciliations reconciliationTriggerInterval: 1s # -- Maximum number of entries in egress gateway policy map # maxPolicyEntries: 16384 - vtep: -# -- Enables VXLAN Tunnel Endpoint (VTEP) Integration (beta) to allow -# Cilium-managed pods to talk to third party VTEP devices over Cilium tunnel. + # -- Enables VXLAN Tunnel Endpoint (VTEP) Integration (beta) to allow + # Cilium-managed pods to talk to third party VTEP devices over Cilium tunnel. enabled: false - -# -- A space separated list of VTEP device endpoint IPs, for example "1.1.1.1 1.1.2.1" + # -- A space separated list of VTEP device endpoint IPs, for example "1.1.1.1 1.1.2.1" endpoint: "" -# -- A space separated list of VTEP device CIDRs, for example "1.1.1.0/24 1.1.2.0/24" + # -- A space separated list of VTEP device CIDRs, for example "1.1.1.0/24 1.1.2.0/24" cidr: "" -# -- VTEP CIDRs Mask that applies to all VTEP CIDRs, for example "255.255.255.0" + # -- VTEP CIDRs Mask that applies to all VTEP CIDRs, for example "255.255.255.0" mask: "" -# -- A space separated list of VTEP device MAC addresses (VTEP MAC), for example "x:x:x:x:x:x y:y:y:y:y:y:y" + # -- A space separated list of VTEP device MAC addresses (VTEP MAC), for example "x:x:x:x:x:x y:y:y:y:y:y:y" mac: "" - # -- (string) Allows to explicitly specify the IPv4 CIDR for native routing. # When specified, Cilium assumes networking for this CIDR is preconfigured and # hands traffic destined for that range to the Linux network stack without @@ -1875,7 +1921,6 @@ vtep: # the user must configure the routes to reach pods, either manually or by # setting the auto-direct-node-routes flag. ipv4NativeRoutingCIDR: "" - # -- (string) Allows to explicitly specify the IPv6 CIDR for native routing. # When specified, Cilium assumes networking for this CIDR is preconfigured and # hands traffic destined for that range to the Linux network stack without @@ -1887,12 +1932,10 @@ ipv4NativeRoutingCIDR: "" # the user must configure the routes to reach pods, either manually or by # setting the auto-direct-node-routes flag. ipv6NativeRoutingCIDR: "" - # -- cilium-monitor sidecar. monitor: # -- Enable the cilium-monitor sidecar. enabled: false - # -- Configure service load balancing loadBalancer: # -- standalone enables the standalone L4LB which does not connect to @@ -1913,7 +1956,6 @@ loadBalancer: # path), or best-effort (use native mode XDP acceleration on devices # that support it). acceleration: disabled - # -- dsrDispatch configures whether IP option or IPIP encapsulation is # used to pass a service IP and port to remote backend # dsrDispatch: opt @@ -1942,40 +1984,46 @@ loadBalancer: # service annotation (e.g. service.cilium.io/lb-l7-algorithm) # Applicable values: round_robin, least_request, random algorithm: round_robin - # -- Configure N-S k8s service loadbalancing nodePort: # -- Enable the Cilium NodePort service implementation. enabled: false - # -- Port range to use for NodePort services. # range: "30000,32767" + # @schema + # type: [null, string, array] + # @schema + # -- List of CIDRs for choosing which IP addresses assigned to native devices are used for NodePort load-balancing. + # By default this is empty and the first suitable, preferably private, IPv4 and IPv6 address assigned to each device is used. + # + # Example: + # + # addresses: ["192.168.1.0/24", "2001::/64"] + # + addresses: ~ # -- Set to true to prevent applications binding to service ports. bindProtection: true - # -- Append NodePort range to ip_local_reserved_ports if clash with ephemeral # ports is detected. autoProtectPortRange: true - # -- Enable healthcheck nodePort server for NodePort services enableHealthCheck: true - # -- Enable access of the healthcheck nodePort on the LoadBalancerIP. Needs # EnableHealthCheck to be enabled enableHealthCheckLoadBalancerIP: false - # policyAuditMode: false # -- The agent can be put into one of the three policy enforcement modes: # default, always and never. # ref: https://docs.cilium.io/en/stable/security/policy/intro/#policy-enforcement-modes policyEnforcementMode: "default" - +# @schema +# type: [null, string, array] +# @schema # -- policyCIDRMatchMode is a list of entities that may be selected by CIDR selector. # The possible value is "nodes". policyCIDRMatchMode: - pprof: # -- Enable pprof for cilium-agent enabled: false @@ -1983,7 +2031,6 @@ pprof: address: localhost # -- Configure pprof listen port for cilium-agent port: 6060 - # -- Configure prometheus metrics on the configured port at /metrics prometheus: enabled: false @@ -2009,17 +2056,21 @@ prometheus: - __meta_kubernetes_pod_node_name targetLabel: node replacement: ${1} + # @schema + # type: [null, array] + # @schema # -- Metrics relabeling configs for the ServiceMonitor cilium-agent metricRelabelings: ~ # -- Set to `true` and helm will not check for monitoring.coreos.com/v1 CRDs before deploying trustCRDsExist: false - + # @schema + # type: [null, array] + # @schema # -- Metrics that should be enabled or disabled from the default metric list. # The list is expected to be separated by a space. (+metric_foo to enable # metric_foo , -metric_bar to disable metric_bar). # ref: https://docs.cilium.io/en/stable/observability/metrics/ metrics: ~ - # --- Enable controller group metrics for monitoring specific Cilium # subsystems. The list is a list of controller group names. The special # values of "all" and "none" are supported. The set of controller @@ -2028,40 +2079,36 @@ prometheus: - write-cni-file - sync-host-ips - sync-lb-maps-with-k8s-services - # -- Grafana dashboards for cilium-agent # grafana can import dashboards based on the label and value # ref: https://github.com/grafana/helm-charts/tree/main/charts/grafana#sidecar-for-dashboards dashboards: enabled: false label: grafana_dashboard + # @schema + # type: [null, string] + # @schema namespace: ~ labelValue: "1" annotations: {} - -# -- Configure Istio proxy options. -proxy: - - prometheus: - # -- Deprecated in favor of envoy.prometheus.enabled - enabled: true - # -- Deprecated in favor of envoy.prometheus.port - port: ~ - # -- Regular expression matching compatible Istio sidecar istio-proxy - # container image names - sidecarImageRegex: "cilium/istio_proxy" - # Configure Cilium Envoy options. envoy: + # @schema + # type: [null, boolean] + # @schema # -- Enable Envoy Proxy in standalone DaemonSet. - enabled: false - + # This field is enabled by default for new installation. + # @default -- `true` for new installation + enabled: ~ + # -- (int) + # Set Envoy'--base-id' to use when allocating shared memory regions. + # Only needs to be changed if multiple Envoy instances will run on the same node and may have conflicts. Supported values: 0 - 4294967295. Defaults to '0' + baseID: 0 log: # -- The format string to use for laying out the log message metadata of Envoy. format: "[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v" # -- Path to a separate Envoy log file, if any. Defaults to /dev/stdout. path: "" - # -- Time in seconds after which a TCP connection attempt times out connectTimeoutSeconds: 2 # -- ProxyMaxRequestsPerConnection specifies the max_requests_per_connection setting for Envoy @@ -2075,58 +2122,53 @@ envoy: xffNumTrustedHopsL7PolicyIngress: 0 # -- Number of trusted hops regarding the x-forwarded-for and related HTTP headers for the egress L7 policy enforcement Envoy listeners. xffNumTrustedHopsL7PolicyEgress: 0 - # -- Envoy container image. image: + # @schema + # type: [null, string] + # @schema override: ~ repository: "quay.io/cilium/cilium-envoy" - tag: "v1.28.4-b35188ffa1bbe54d1720d2e392779f7a48e58f6b" + tag: "v1.29.7-39a2a56bbd5b3a591f69dbca51d3e30ef97e0e51" pullPolicy: "IfNotPresent" - digest: "sha256:b528b291561e459024f66414ac3325b88cdd8f9f4854828a155a11e5b10b78a3" + digest: "sha256:bd5ff8c66716080028f414ec1cb4f7dc66f40d2fb5a009fff187f4a9b90b566b" useDigest: true - # -- Additional containers added to the cilium Envoy DaemonSet. extraContainers: [] - # -- Additional envoy container arguments. extraArgs: [] - # -- Additional envoy container environment variables. extraEnv: [] - # -- Additional envoy hostPath mounts. extraHostPathMounts: [] - # - name: host-mnt-data - # mountPath: /host/mnt/data - # hostPath: /mnt/data - # hostPathType: Directory - # readOnly: true - # mountPropagation: HostToContainer + # - name: host-mnt-data + # mountPath: /host/mnt/data + # hostPath: /mnt/data + # hostPathType: Directory + # readOnly: true + # mountPropagation: HostToContainer # -- Additional envoy volumes. extraVolumes: [] - # -- Additional envoy volumeMounts. extraVolumeMounts: [] - # -- Configure termination grace period for cilium-envoy DaemonSet. terminationGracePeriodSeconds: 1 - # -- TCP port for the health API. healthPort: 9878 - # -- cilium-envoy update strategy # ref: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/#updating-a-daemonset updateStrategy: type: RollingUpdate rollingUpdate: + # @schema + # type: [integer, string] + # @schema maxUnavailable: 2 # -- Roll out cilium envoy pods automatically when configmap is updated. rollOutPods: false - # -- Annotations to be added to all top-level cilium-envoy objects (resources under templates/cilium-envoy) annotations: {} - # -- Security Context for cilium-envoy pods. podSecurityContext: # -- AppArmorProfile options for the `cilium-agent` and init containers @@ -2134,19 +2176,17 @@ envoy: type: "Unconfined" # -- Annotations to be added to envoy pods podAnnotations: {} - # -- Labels to be added to envoy pods podLabels: {} - # -- Envoy resource limits & requests # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ resources: {} - # limits: - # cpu: 4000m - # memory: 4Gi - # requests: - # cpu: 100m - # memory: 512Mi + # limits: + # cpu: 4000m + # memory: 4Gi + # requests: + # cpu: 100m + # memory: 512Mi startupProbe: # -- failure threshold of startup probe. @@ -2164,7 +2204,6 @@ envoy: failureThreshold: 3 # -- interval between checks of the readiness probe periodSeconds: 30 - securityContext: # -- User to run the pod with # runAsUser: 0 @@ -2178,7 +2217,13 @@ envoy: # type available on the system. type: 'spc_t' capabilities: - # -- Capabilities for the `cilium-envoy` container + # -- Capabilities for the `cilium-envoy` container. + # Even though granted to the container, the cilium-envoy-starter wrapper drops + # all capabilities after forking the actual Envoy process. + # `NET_BIND_SERVICE` is the only capability that can be passed to the Envoy process by + # setting `envoy.securityContext.capabilities.keepNetBindService=true` (in addition to granting the + # capability to the container). + # Note: In case of embedded envoy, the capability must be granted to the cilium-agent container. envoy: # Used since cilium proxy uses setting IPPROTO_IP/IP_TRANSPARENT - NET_ADMIN @@ -2191,49 +2236,60 @@ envoy: # If available, SYS_ADMIN can be removed. #- PERFMON #- BPF - + # -- Keep capability `NET_BIND_SERVICE` for Envoy process. + keepCapNetBindService: false # -- Affinity for cilium-envoy. affinity: podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - - topologyKey: kubernetes.io/hostname - labelSelector: - matchLabels: - k8s-app: cilium-envoy + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: cilium-envoy podAffinity: requiredDuringSchedulingIgnoredDuringExecution: - - topologyKey: kubernetes.io/hostname - labelSelector: - matchLabels: - k8s-app: cilium + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: cilium nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - - key: cilium.io/no-schedule - operator: NotIn - values: - - "true" + - key: cilium.io/no-schedule + operator: NotIn + values: + - "true" # -- Node selector for cilium-envoy. nodeSelector: kubernetes.io/os: linux - # -- Node tolerations for envoy scheduling to nodes with taints # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ tolerations: - - operator: Exists - # - key: "key" - # operator: "Equal|Exists" - # value: "value" - # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" - + - operator: Exists + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + # @schema + # type: [null, string] + # @schema # -- The priority class to use for cilium-envoy. priorityClassName: ~ - + # @schema + # type: [null, string] + # @schema # -- DNS policy for Cilium envoy pods. # Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy dnsPolicy: ~ - + debug: + admin: + # -- Enable admin interface for cilium-envoy. + # This is useful for debugging and should not be enabled in production. + enabled: false + # -- Port number (bound to loopback interface). + # kubectl port-forward can be used to access the admin interface. + port: 9901 # -- Configure Cilium Envoy Prometheus options. # Note that some of these apply to either cilium-agent or cilium-envoy. prometheus: @@ -2261,17 +2317,16 @@ envoy: - __meta_kubernetes_pod_node_name targetLabel: node replacement: ${1} + # @schema + # type: [null, array] + # @schema # -- Metrics relabeling configs for the ServiceMonitor cilium-envoy # or for cilium-agent with Envoy configured. metricRelabelings: ~ # -- Serve prometheus metrics for cilium-envoy on the configured port port: "9964" - -# -- Enable use of the remote node identity. -# ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity -# Deprecated without replacement in 1.15. To be removed in 1.16. -remoteNodeIdentity: true - +# -- Enable/Disable use of node label based identity +nodeSelectorLabels: false # -- Enable resource quotas for priority classes used in the cluster. resourceQuotas: enabled: false @@ -2283,7 +2338,6 @@ resourceQuotas: hard: # 15 "clusterwide" Cilium Operator pods for HA pods: "15" - # Need to document default ################## #sessionAffinity: false @@ -2292,13 +2346,10 @@ resourceQuotas: # uninstall Cilium as it will stop Cilium from starting and create artifacts # in the node. sleepAfterInit: false - # -- Enable check of service source ranges (currently, only for LoadBalancer). svcSourceRangeCheck: true - # -- Synchronize Kubernetes nodes to kvstore and perform CNP GC. synchronizeK8sNodes: true - # -- Configure TLS configuration in the agent. tls: # -- This configures how the Cilium agent loads the secrets used TLS-aware CiliumNetworkPolicies @@ -2307,7 +2358,6 @@ tls: # - local # - k8s secretsBackend: local - # -- Base64 encoded PEM values for the CA certificate and private key. # This can be used as common CA to generate certificates used by hubble and clustermesh components. # It is neither required nor used when cert-manager is used to generate the certificates. @@ -2315,30 +2365,23 @@ tls: # -- Optional CA cert. If it is provided, it will be used by cilium to # generate all other certificates. Otherwise, an ephemeral CA is generated. cert: "" - # -- Optional CA private key. If it is provided, it will be used by cilium to # generate all other certificates. Otherwise, an ephemeral CA is generated. key: "" - # -- Generated certificates validity duration in days. This will be used for auto generated CA. certValidityDuration: 1095 - # -- Configure the CA trust bundle used for the validation of the certificates # leveraged by hubble and clustermesh. When enabled, it overrides the content of the # 'ca.crt' field of the respective certificates, allowing for CA rotation with no down-time. caBundle: # -- Enable the use of the CA trust bundle. enabled: false - # -- Name of the ConfigMap containing the CA trust bundle. name: cilium-root-ca.crt - # -- Entry of the ConfigMap containing the CA trust bundle. key: ca.crt - # -- Use a Secret instead of a ConfigMap. useSecret: false - # If uncommented, creates the ConfigMap and fills it with the specified content. # Otherwise, the ConfigMap is assumed to be already present in .Release.Namespace. # @@ -2349,7 +2392,6 @@ tls: # -----BEGIN CERTIFICATE----- # ... # -----END CERTIFICATE----- - # -- Tunneling protocol to use in tunneling mode and for ad-hoc tunnels. # Possible values: # - "" @@ -2357,7 +2399,6 @@ tls: # - geneve # @default -- `"vxlan"` tunnelProtocol: "" - # -- Enable native-routing mode or tunneling mode. # Possible values: # - "" @@ -2365,276 +2406,167 @@ tunnelProtocol: "" # - tunnel # @default -- `"tunnel"` routingMode: "" - # -- Configure VXLAN and Geneve tunnel port. # @default -- Port 8472 for VXLAN, Port 6081 for Geneve tunnelPort: 0 - # -- Configure what the response should be to traffic for a service without backends. # "reject" only works on kernels >= 5.10, on lower kernels we fallback to "drop". # Possible values: # - reject (default) # - drop serviceNoBackendResponse: reject - # -- Configure the underlying network MTU to overwrite auto-detected MTU. +# This value doesn't change the host network interface MTU i.e. eth0 or ens0. +# It changes the MTU for cilium_net@cilium_host, cilium_host@cilium_net, +# cilium_vxlan and lxc_health interfaces. MTU: 0 - # -- Disable the usage of CiliumEndpoint CRD. disableEndpointCRD: false - wellKnownIdentities: # -- Enable the use of well-known identities. enabled: false - etcd: # -- Enable etcd mode for the agent. enabled: false - - # -- cilium-etcd-operator image. - image: - override: ~ - repository: "quay.io/cilium/cilium-etcd-operator" - tag: "v2.0.7" - digest: "sha256:04b8327f7f992693c2cb483b999041ed8f92efc8e14f2a5f3ab95574a65ea2dc" - useDigest: true - pullPolicy: "IfNotPresent" - - # -- The priority class to use for cilium-etcd-operator - priorityClassName: "" - - # -- Additional cilium-etcd-operator container arguments. - extraArgs: [] - - # -- Additional cilium-etcd-operator volumes. - extraVolumes: [] - - # -- Additional cilium-etcd-operator volumeMounts. - extraVolumeMounts: [] - - # -- Node tolerations for cilium-etcd-operator scheduling to nodes with taints - # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ - tolerations: - - operator: Exists - # - key: "key" - # operator: "Equal|Exists" - # value: "value" - # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" - - # -- Pod topology spread constraints for cilium-etcd-operator - topologySpreadConstraints: [] - # - maxSkew: 1 - # topologyKey: topology.kubernetes.io/zone - # whenUnsatisfiable: DoNotSchedule - - # -- Node labels for cilium-etcd-operator pod assignment - # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector - nodeSelector: - kubernetes.io/os: linux - - # -- Annotations to be added to all top-level etcd-operator objects (resources under templates/etcd-operator) - annotations: {} - - # -- Security context to be added to cilium-etcd-operator pods - podSecurityContext: {} - - # -- Annotations to be added to cilium-etcd-operator pods - podAnnotations: {} - - # -- Labels to be added to cilium-etcd-operator pods - podLabels: {} - - # PodDisruptionBudget settings - podDisruptionBudget: - # -- enable PodDisruptionBudget - # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ - enabled: false - # -- Minimum number/percentage of pods that should remain scheduled. - # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` - minAvailable: null - # -- Maximum number/percentage of pods that may be made unavailable - maxUnavailable: 1 - - # -- cilium-etcd-operator resource limits & requests - # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - resources: {} - # limits: - # cpu: 4000m - # memory: 4Gi - # requests: - # cpu: 100m - # memory: 512Mi - - # -- Security context to be added to cilium-etcd-operator pods - securityContext: {} - # runAsUser: 0 - - # -- cilium-etcd-operator update strategy - updateStrategy: - type: RollingUpdate - rollingUpdate: - maxSurge: 1 - maxUnavailable: 1 - - # -- If etcd is behind a k8s service set this option to true so that Cilium - # does the service translation automatically without requiring a DNS to be - # running. - k8sService: false - - # -- Cluster domain for cilium-etcd-operator. - clusterDomain: cluster.local - - # -- List of etcd endpoints (not needed when using managed=true). + # -- List of etcd endpoints endpoints: - https://CHANGE-ME:2379 - - # -- Enable use of TLS/SSL for connectivity to etcd. (auto-enabled if - # managed=true) + # -- Enable use of TLS/SSL for connectivity to etcd. ssl: false - operator: # -- Enable the cilium-operator component (required). enabled: true - # -- Roll out cilium-operator pods automatically when configmap is updated. rollOutPods: false - # -- cilium-operator image. image: + # @schema + # type: [null, string] + # @schema override: ~ repository: "quay.io/cilium/operator" - tag: "v1.15.6" + tag: "v1.16.0" # operator-generic-digest - genericDigest: "sha256:5789f0935eef96ad571e4f5565a8800d3a8fbb05265cf6909300cd82fd513c3d" + genericDigest: "sha256:d6621c11c4e4943bf2998af7febe05be5ed6fdcf812b27ad4388f47022190316" # operator-azure-digest - azureDigest: "sha256:386456c055c5d1380daf966d565fcafaed68467a4fe692679530764e3b56f170" + azureDigest: "sha256:dd7562e20bc72b55c65e2110eb98dca1dd2bbf6688b7d8cea2bc0453992c121d" # operator-aws-digest - awsDigest: "sha256:9656d44ee69817d156cc7d3797f92de2e534dfb991610c79c00e097b4dedd620" + awsDigest: "sha256:8dbe47a77ba8e1a5b111647a43db10c213d1c7dfc9f9aab5ef7279321ad21a2f" # operator-alibabacloud-digest - alibabacloudDigest: "sha256:7e1664bd18645b38fd41dc1c2decd334abeefe63d4d69bfbc65765806eb4a31f" + alibabacloudDigest: "sha256:d2d9f450f2fc650d74d4b3935f4c05736e61145b9c6927520ea52e1ebcf4f3ea" useDigest: true pullPolicy: "IfNotPresent" suffix: "" - # -- Number of replicas to run for the cilium-operator deployment replicas: 2 - # -- The priority class to use for cilium-operator priorityClassName: "" - # -- DNS policy for Cilium operator pods. # Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy dnsPolicy: "" - # -- cilium-operator update strategy updateStrategy: type: RollingUpdate rollingUpdate: + # @schema + # type: [integer, string] + # @schema maxSurge: 25% + # @schema + # type: [integer, string] + # @schema maxUnavailable: 50% - # -- Affinity for cilium-operator affinity: podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - - topologyKey: kubernetes.io/hostname - labelSelector: - matchLabels: - io.cilium/app: operator - + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + io.cilium/app: operator # -- Pod topology spread constraints for cilium-operator topologySpreadConstraints: [] - # - maxSkew: 1 - # topologyKey: topology.kubernetes.io/zone - # whenUnsatisfiable: DoNotSchedule + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule # -- Node labels for cilium-operator pod assignment # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector nodeSelector: kubernetes.io/os: linux - # -- Node tolerations for cilium-operator scheduling to nodes with taints # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ tolerations: - - operator: Exists - # - key: "key" - # operator: "Equal|Exists" - # value: "value" - # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" - + - operator: Exists + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" # -- Additional cilium-operator container arguments. extraArgs: [] - # -- Additional cilium-operator environment variables. extraEnv: [] - # -- Additional cilium-operator hostPath mounts. extraHostPathMounts: [] - # - name: host-mnt-data - # mountPath: /host/mnt/data - # hostPath: /mnt/data - # hostPathType: Directory - # readOnly: true - # mountPropagation: HostToContainer + # - name: host-mnt-data + # mountPath: /host/mnt/data + # hostPath: /mnt/data + # hostPathType: Directory + # readOnly: true + # mountPropagation: HostToContainer # -- Additional cilium-operator volumes. extraVolumes: [] - # -- Additional cilium-operator volumeMounts. extraVolumeMounts: [] - # -- Annotations to be added to all top-level cilium-operator objects (resources under templates/cilium-operator) annotations: {} - + # -- HostNetwork setting + hostNetwork: true # -- Security context to be added to cilium-operator pods podSecurityContext: {} - # -- Annotations to be added to cilium-operator pods podAnnotations: {} - # -- Labels to be added to cilium-operator pods podLabels: {} - # PodDisruptionBudget settings podDisruptionBudget: # -- enable PodDisruptionBudget # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ enabled: false + # @schema + # type: [null, integer, string] + # @schema # -- Minimum number/percentage of pods that should remain scheduled. # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` minAvailable: null + # @schema + # type: [null, integer, string] + # @schema # -- Maximum number/percentage of pods that may be made unavailable maxUnavailable: 1 - # -- cilium-operator resource limits & requests # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ resources: {} - # limits: - # cpu: 1000m - # memory: 1Gi - # requests: - # cpu: 100m - # memory: 128Mi + # limits: + # cpu: 1000m + # memory: 1Gi + # requests: + # cpu: 100m + # memory: 128Mi # -- Security context to be added to cilium-operator pods securityContext: {} - # runAsUser: 0 + # runAsUser: 0 # -- Interval for endpoint garbage collection. endpointGCInterval: "5m0s" - # -- Interval for cilium node garbage collection. nodeGCInterval: "5m0s" - - # -- Skip CNP node status clean up at operator startup. - skipCNPStatusStartupClean: false - # -- Interval for identity garbage collection. identityGCInterval: "15m0s" - # -- Timeout for identity heartbeats. identityHeartbeatTimeout: "30m0s" - pprof: # -- Enable pprof for cilium-operator enabled: false @@ -2642,7 +2574,6 @@ operator: address: localhost # -- Configure pprof listen port for cilium-operator port: 6061 - # -- Enable prometheus metrics for cilium-operator on the configured port at # /metrics prometheus: @@ -2660,96 +2591,92 @@ operator: jobLabel: "" # -- Interval for scrape metrics. interval: "10s" + # @schema + # type: [null, array] + # @schema # -- Relabeling configs for the ServiceMonitor cilium-operator relabelings: ~ + # @schema + # type: [null, array] + # @schema # -- Metrics relabeling configs for the ServiceMonitor cilium-operator metricRelabelings: ~ - # -- Grafana dashboards for cilium-operator # grafana can import dashboards based on the label and value # ref: https://github.com/grafana/helm-charts/tree/main/charts/grafana#sidecar-for-dashboards dashboards: enabled: false label: grafana_dashboard + # @schema + # type: [null, string] + # @schema namespace: ~ labelValue: "1" annotations: {} - # -- Skip CRDs creation for cilium-operator skipCRDCreation: false - # -- Remove Cilium node taint from Kubernetes nodes that have a healthy Cilium # pod running. removeNodeTaints: true - + # @schema + # type: [null, boolean] + # @schema # -- Taint nodes where Cilium is scheduled but not running. This prevents pods # from being scheduled to nodes where Cilium is not the default CNI provider. # @default -- same as removeNodeTaints setNodeTaints: ~ - # -- Set Node condition NetworkUnavailable to 'false' with the reason # 'CiliumIsUp' for nodes that have a healthy Cilium pod. setNodeNetworkStatus: true - unmanagedPodWatcher: # -- Restart any pod that are not managed by Cilium. restart: true # -- Interval, in seconds, to check if there are any pods that are not # managed by Cilium. intervalSeconds: 15 - nodeinit: # -- Enable the node initialization DaemonSet enabled: false - # -- node-init image. image: + # @schema + # type: [null, string] + # @schema override: ~ repository: "quay.io/cilium/startup-script" - tag: "19fb149fb3d5c7a37d3edfaf10a2be3ab7386661" - digest: "sha256:820155cb3b7f00c8d61c1cffa68c44440906cb046bdbad8ff544f5deb1103456" + tag: "c54c7edeab7fde4da68e59acd319ab24af242c3f" + digest: "sha256:8d7b41c4ca45860254b3c19e20210462ef89479bb6331d6760c4e609d651b29c" useDigest: true pullPolicy: "IfNotPresent" - # -- The priority class to use for the nodeinit pod. priorityClassName: "" - # -- node-init update strategy updateStrategy: type: RollingUpdate - # -- Additional nodeinit environment variables. extraEnv: [] - # -- Additional nodeinit volumes. extraVolumes: [] - # -- Additional nodeinit volumeMounts. extraVolumeMounts: [] - # -- Affinity for cilium-nodeinit affinity: {} - # -- Node labels for nodeinit pod assignment # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector nodeSelector: kubernetes.io/os: linux - # -- Node tolerations for nodeinit scheduling to nodes with taints # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ tolerations: - - operator: Exists - # - key: "key" - # operator: "Equal|Exists" - # value: "value" - # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" - + - operator: Exists + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" # -- Annotations to be added to all top-level nodeinit objects (resources under templates/cilium-nodeinit) annotations: {} - # -- Annotations to be added to node-init pods. podAnnotations: {} - # -- Labels to be added to node-init pods. podLabels: {} # -- Security Context for cilium-node-init pods. @@ -2763,7 +2690,6 @@ nodeinit: requests: cpu: 100m memory: 100Mi - # -- Security context to be added to nodeinit pods. securityContext: privileged: false @@ -2782,142 +2708,123 @@ nodeinit: - SYS_ADMIN - SYS_CHROOT - SYS_PTRACE - # -- bootstrapFile is the location of the file where the bootstrap timestamp is # written by the node-init DaemonSet bootstrapFile: "/tmp/cilium-bootstrap.d/cilium-bootstrap-time" - # -- startup offers way to customize startup nodeinit script (pre and post position) startup: - preScript: "" - postScript: "" + preScript: "" + postScript: "" # -- prestop offers way to customize prestop nodeinit script (pre and post position) prestop: - preScript: "" - postScript: "" - + preScript: "" + postScript: "" preflight: # -- Enable Cilium pre-flight resources (required for upgrade) enabled: false - # -- Cilium pre-flight image. image: + # @schema + # type: [null, string] + # @schema override: ~ repository: "quay.io/cilium/cilium" - tag: "v1.15.6" + tag: "v1.16.0" # cilium-digest - digest: "sha256:6aa840986a3a9722cd967ef63248d675a87add7e1704740902d5d3162f0c0def" + digest: "sha256:46ffa4ef3cf6d8885dcc4af5963b0683f7d59daa90d49ed9fb68d3b1627fe058" useDigest: true pullPolicy: "IfNotPresent" - # -- The priority class to use for the preflight pod. priorityClassName: "" - # -- preflight update strategy updateStrategy: type: RollingUpdate - # -- Additional preflight environment variables. extraEnv: [] - # -- Additional preflight volumes. extraVolumes: [] - # -- Additional preflight volumeMounts. extraVolumeMounts: [] - # -- Affinity for cilium-preflight affinity: podAffinity: requiredDuringSchedulingIgnoredDuringExecution: - - topologyKey: kubernetes.io/hostname - labelSelector: - matchLabels: - k8s-app: cilium - + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: cilium # -- Node labels for preflight pod assignment # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector nodeSelector: kubernetes.io/os: linux - # -- Node tolerations for preflight scheduling to nodes with taints # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ tolerations: - - key: node.kubernetes.io/not-ready - effect: NoSchedule - - key: node-role.kubernetes.io/master - effect: NoSchedule - - key: node-role.kubernetes.io/control-plane - effect: NoSchedule - - key: node.cloudprovider.kubernetes.io/uninitialized - effect: NoSchedule - value: "true" - - key: CriticalAddonsOnly - operator: "Exists" - # - key: "key" - # operator: "Equal|Exists" - # value: "value" - # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" - + - operator: Exists + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" # -- Annotations to be added to all top-level preflight objects (resources under templates/cilium-preflight) annotations: {} - # -- Security context to be added to preflight pods. podSecurityContext: {} - # -- Annotations to be added to preflight pods podAnnotations: {} - # -- Labels to be added to the preflight pod. podLabels: {} - # PodDisruptionBudget settings podDisruptionBudget: # -- enable PodDisruptionBudget # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ enabled: false + # @schema + # type: [null, integer, string] + # @schema # -- Minimum number/percentage of pods that should remain scheduled. # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` minAvailable: null + # @schema + # type: [null, integer, string] + # @schema # -- Maximum number/percentage of pods that may be made unavailable maxUnavailable: 1 - # -- preflight resource limits & requests # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ resources: {} - # limits: - # cpu: 4000m - # memory: 4Gi - # requests: - # cpu: 100m - # memory: 512Mi + # limits: + # cpu: 4000m + # memory: 4Gi + # requests: + # cpu: 100m + # memory: 512Mi + readinessProbe: + # -- For how long kubelet should wait before performing the first probe + initialDelaySeconds: 5 + # -- interval between checks of the readiness probe + periodSeconds: 5 # -- Security context to be added to preflight pods securityContext: {} - # runAsUser: 0 + # runAsUser: 0 # -- Path to write the `--tofqdns-pre-cache` file to. tofqdnsPreCache: "" - # -- Configure termination grace period for preflight Deployment and DaemonSet. terminationGracePeriodSeconds: 1 - # -- By default we should always validate the installed CNPs before upgrading # Cilium. This will make sure the user will have the policies deployed in the # cluster with the right schema. validateCNPs: true - # -- Explicitly enable or disable priority class. # .Capabilities.KubeVersion is unsettable in `helm template` calls, # it depends on k8s libraries version that Helm was compiled against. # This option allows to explicitly disable setting the priority class, which # is useful for rendering charts for gke clusters in advance. enableCriticalPriorityClass: true - # disableEnvoyVersionCheck removes the check for Envoy, which can be useful # on AArch64 as the images do not currently ship a version of Envoy. #disableEnvoyVersionCheck: false - clustermesh: # -- Deploy clustermesh-apiserver for clustermesh useAPIServer: false @@ -2927,10 +2834,13 @@ clustermesh: # maximum allocatable cluster-local identities. # Supported values are 255 and 511. maxConnectedClusters: 255 - + # -- Enable the synchronization of Kubernetes EndpointSlices corresponding to + # the remote endpoints of appropriately-annotated global services through ClusterMesh + enableEndpointSliceSynchronization: false + # -- Enable Multi-Cluster Services API support + enableMCSAPISupport: false # -- Annotations to be added to all top-level clustermesh objects (resources under templates/clustermesh-apiserver and templates/clustermesh-config) annotations: {} - # -- Clustermesh explicit configuration. config: # -- Enable the Clustermesh explicit configuration. @@ -2960,18 +2870,23 @@ clustermesh: # cert: "" # key: "" # caCert: "" - apiserver: # -- Clustermesh API server image. image: + # @schema + # type: [null, string] + # @schema override: ~ repository: "quay.io/cilium/clustermesh-apiserver" - tag: "v1.15.6" + tag: "v1.16.0" # clustermesh-apiserver-digest - digest: "sha256:6365c2fe8a038fc7adcdeb7ffb8d7a8a2cd3ee524687f35fff9df76fafeeb029" + digest: "sha256:a1597b7de97cfa03f1330e6b784df1721eb69494cd9efb0b3a6930680dfe7a8e" useDigest: true pullPolicy: "IfNotPresent" - + # -- TCP port for the clustermesh-apiserver health API. + healthPort: 9880 + # -- Configuration for the clustermesh-apiserver readiness probe. + readinessProbe: {} etcd: # The etcd binary is included in the clustermesh API server image, so the same image from above is reused. # Independent override isn't supported, because clustermesh-apiserver is tested against the etcd version it is @@ -2987,11 +2902,13 @@ clustermesh: # memory: 256Mi # -- Security context to be added to clustermesh-apiserver etcd containers - securityContext: {} - + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL # -- lifecycle setting for the etcd container lifecycle: {} - init: # -- Specifies the resources for etcd init container in the apiserver resources: {} @@ -3004,43 +2921,48 @@ clustermesh: # -- Additional arguments to `clustermesh-apiserver etcdinit`. extraArgs: [] - # -- Additional environment variables to `clustermesh-apiserver etcdinit`. extraEnv: [] - + # @schema + # enum: [Disk, Memory] + # @schema + # -- Specifies whether etcd data is stored in a temporary volume backed by + # the node's default medium, such as disk, SSD or network storage (Disk), or + # RAM (Memory). The Memory option enables improved etcd read and write + # performance at the cost of additional memory usage, which counts against + # the memory limits of the container. + storageMedium: Disk kvstoremesh: # -- Enable KVStoreMesh. KVStoreMesh caches the information retrieved # from the remote clusters in the local etcd instance. - enabled: false - + enabled: true + # -- TCP port for the KVStoreMesh health API. + healthPort: 9881 + # -- Configuration for the KVStoreMesh readiness probe. + readinessProbe: {} # -- Additional KVStoreMesh arguments. extraArgs: [] - # -- Additional KVStoreMesh environment variables. extraEnv: [] - # -- Resource requests and limits for the KVStoreMesh container resources: {} - # requests: - # cpu: 100m - # memory: 64Mi - # limits: - # cpu: 1000m - # memory: 1024M + # requests: + # cpu: 100m + # memory: 64Mi + # limits: + # cpu: 1000m + # memory: 1024M # -- Additional KVStoreMesh volumeMounts. extraVolumeMounts: [] - # -- KVStoreMesh Security context securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL - # -- lifecycle setting for the KVStoreMesh container lifecycle: {} - service: # -- The type of service used for apiserver access. type: NodePort @@ -3054,114 +2976,144 @@ clustermesh: # NodePort will be redirected to a local backend, regardless of whether the # destination node belongs to the local or the remote cluster. nodePort: 32379 - # -- Optional loadBalancer IP address to use with type LoadBalancer. - # loadBalancerIP: - # -- Annotations for the clustermesh-apiserver # For GKE LoadBalancer, use annotation cloud.google.com/load-balancer-type: "Internal" - # For EKS LoadBalancer, use annotation service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 + # For EKS LoadBalancer, use annotation service.beta.kubernetes.io/aws-load-balancer-internal: "true" annotations: {} - + # @schema + # enum: [Local, Cluster] + # @schema # -- The externalTrafficPolicy of service used for apiserver access. - externalTrafficPolicy: - + externalTrafficPolicy: Cluster + # @schema + # enum: [Local, Cluster] + # @schema # -- The internalTrafficPolicy of service used for apiserver access. - internalTrafficPolicy: - + internalTrafficPolicy: Cluster + # @schema + # enum: [HAOnly, Always, Never] + # @schema + # -- Defines when to enable session affinity. + # Each replica in a clustermesh-apiserver deployment runs its own discrete + # etcd cluster. Remote clients connect to one of the replicas through a + # shared Kubernetes Service. A client reconnecting to a different backend + # will require a full resync to ensure data integrity. Session affinity + # can reduce the likelihood of this happening, but may not be supported + # by all cloud providers. + # Possible values: + # - "HAOnly" (default) Only enable session affinity for deployments with more than 1 replica. + # - "Always" Always enable session affinity. + # - "Never" Never enable session affinity. Useful in environments where + # session affinity is not supported, but may lead to slightly + # degraded performance due to more frequent reconnections. + enableSessionAffinity: "HAOnly" + # @schema + # type: [null, string] + # @schema + # -- Configure a loadBalancerClass. + # Allows to configure the loadBalancerClass on the clustermesh-apiserver + # LB service in case the Service type is set to LoadBalancer + # (requires Kubernetes 1.24+). + loadBalancerClass: ~ + # @schema + # type: [null, string] + # @schema + # -- Configure a specific loadBalancerIP. + # Allows to configure a specific loadBalancerIP on the clustermesh-apiserver + # LB service in case the Service type is set to LoadBalancer. + loadBalancerIP: ~ # -- Number of replicas run for the clustermesh-apiserver deployment. replicas: 1 - # -- lifecycle setting for the apiserver container lifecycle: {} - # -- terminationGracePeriodSeconds for the clustermesh-apiserver deployment terminationGracePeriodSeconds: 30 - # -- Additional clustermesh-apiserver arguments. extraArgs: [] - # -- Additional clustermesh-apiserver environment variables. extraEnv: [] - # -- Additional clustermesh-apiserver volumes. extraVolumes: [] - # -- Additional clustermesh-apiserver volumeMounts. extraVolumeMounts: [] - # -- Security context to be added to clustermesh-apiserver containers - securityContext: {} - + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL # -- Security context to be added to clustermesh-apiserver pods - podSecurityContext: {} - + podSecurityContext: + runAsNonRoot: true + runAsUser: 65532 + runAsGroup: 65532 + fsGroup: 65532 # -- Annotations to be added to clustermesh-apiserver pods podAnnotations: {} - # -- Labels to be added to clustermesh-apiserver pods podLabels: {} - # PodDisruptionBudget settings podDisruptionBudget: # -- enable PodDisruptionBudget # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ enabled: false + # @schema + # type: [null, integer, string] + # @schema # -- Minimum number/percentage of pods that should remain scheduled. # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` minAvailable: null + # @schema + # type: [null, integer, string] + # @schema # -- Maximum number/percentage of pods that may be made unavailable maxUnavailable: 1 - - # -- Resource requests and limits for the clustermesh-apiserver container of the clustermesh-apiserver deployment, such as - # resources: - # limits: - # cpu: 1000m - # memory: 1024M - # requests: - # cpu: 100m - # memory: 64Mi # -- Resource requests and limits for the clustermesh-apiserver resources: {} - # requests: - # cpu: 100m - # memory: 64Mi - # limits: - # cpu: 1000m - # memory: 1024M + # requests: + # cpu: 100m + # memory: 64Mi + # limits: + # cpu: 1000m + # memory: 1024M # -- Affinity for clustermesh.apiserver affinity: podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - topologyKey: kubernetes.io/hostname - labelSelector: - matchLabels: - k8s-app: clustermesh-apiserver - + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchLabels: + k8s-app: clustermesh-apiserver + topologyKey: kubernetes.io/hostname # -- Pod topology spread constraints for clustermesh-apiserver topologySpreadConstraints: [] - # - maxSkew: 1 - # topologyKey: topology.kubernetes.io/zone - # whenUnsatisfiable: DoNotSchedule + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule # -- Node labels for pod assignment # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector nodeSelector: kubernetes.io/os: linux - # -- Node tolerations for pod assignment on nodes with taints # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ tolerations: [] - # -- clustermesh-apiserver update strategy updateStrategy: type: RollingUpdate rollingUpdate: - maxUnavailable: 1 - + # @schema + # type: [integer, string] + # @schema + maxSurge: 1 + # @schema + # type: [integer, string] + # @schema + maxUnavailable: 0 # -- The priority class to use for clustermesh-apiserver priorityClassName: "" - tls: # -- Configure the clustermesh authentication mode. # Supported values: @@ -3179,7 +3131,13 @@ clustermesh: # if provided manually. Cluster mode is meaningful only when the same # CA is shared across all clusters part of the mesh. authMode: legacy - + # -- Allow users to provide their own certificates + # Users may need to provide their certificates using + # a mechanism that requires they provide their own secrets. + # This setting does not apply to any of the auto-generated + # mechanisms below, it only restricts the creation of secrets + # via the `tls-provided` templates. + enableSecrets: true # -- Configure automatic TLS certificates generation. # A Kubernetes CronJob is used the generate any # certificates not provided by the user at installation @@ -3243,20 +3201,17 @@ clustermesh: remote: cert: "" key: "" - # clustermesh-apiserver Prometheus metrics configuration metrics: # -- Enables exporting apiserver metrics in OpenMetrics format. enabled: true # -- Configure the port the apiserver metric server listens on. port: 9962 - kvstoremesh: # -- Enables exporting KVStoreMesh metrics in OpenMetrics format. enabled: true - # -- Configure the port the KVStoreMesh metric server listens on. + # -- Configure the port the KVStoreMesh metric server listens on. port: 9964 - etcd: # -- Enables exporting etcd metrics in OpenMetrics format. enabled: true @@ -3264,7 +3219,6 @@ clustermesh: mode: basic # -- Configure the port the etcd metric server listens on. port: 9963 - serviceMonitor: # -- Enable service monitor. # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) @@ -3279,32 +3233,46 @@ clustermesh: # -- Interval for scrape metrics (apiserver metrics) interval: "10s" + # @schema + # type: [null, array] + # @schema # -- Relabeling configs for the ServiceMonitor clustermesh-apiserver (apiserver metrics) relabelings: ~ + # @schema + # type: [null, array] + # @schema # -- Metrics relabeling configs for the ServiceMonitor clustermesh-apiserver (apiserver metrics) metricRelabelings: ~ - kvstoremesh: # -- Interval for scrape metrics (KVStoreMesh metrics) interval: "10s" + # @schema + # type: [null, array] + # @schema # -- Relabeling configs for the ServiceMonitor clustermesh-apiserver (KVStoreMesh metrics) relabelings: ~ + # @schema + # type: [null, array] + # @schema # -- Metrics relabeling configs for the ServiceMonitor clustermesh-apiserver (KVStoreMesh metrics) metricRelabelings: ~ - etcd: # -- Interval for scrape metrics (etcd metrics) interval: "10s" + # @schema + # type: [null, array] + # @schema # -- Relabeling configs for the ServiceMonitor clustermesh-apiserver (etcd metrics) relabelings: ~ + # @schema + # type: [null, array] + # @schema # -- Metrics relabeling configs for the ServiceMonitor clustermesh-apiserver (etcd metrics) metricRelabelings: ~ - # -- Configure external workloads support externalWorkloads: # -- Enable support for external workloads, such as VMs (false by default). enabled: false - # -- Configure cgroup related configuration cgroup: autoMount: @@ -3317,27 +3285,30 @@ cgroup: enabled: true # -- Init Container Cgroup Automount resource limits & requests resources: {} - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi # -- Configure cgroup root where cgroup2 filesystem is mounted on the host (see also: `cgroup.autoMount`) hostRoot: /run/cilium/cgroupv2 - +# -- Configure sysctl override described in #20072. +sysctlfix: + # -- Enable the sysctl override. When enabled, the init container will mount the /proc of the host so that the `sysctlfix` utility can execute. + enabled: true # -- Configure whether to enable auto detect of terminating state for endpoints # in order to support graceful termination. enableK8sTerminatingEndpoint: true - # -- Configure whether to unload DNS policy rules on graceful shutdown # dnsPolicyUnloadOnShutdown: false # -- Configure the key of the taint indicating that Cilium is not ready on the node. # When set to a value starting with `ignore-taint.cluster-autoscaler.kubernetes.io/`, the Cluster Autoscaler will ignore the taint on its decisions, allowing the cluster to scale up. agentNotReadyTaintKey: "node.cilium.io/agent-not-ready" - dnsProxy: + # -- Timeout (in seconds) when closing the connection between the DNS proxy and the upstream server. If set to 0, the connection is closed immediately (with TCP RST). If set to -1, the connection is closed asynchronously in the background. + socketLingerTimeout: 10 # -- DNS response code for rejecting DNS requests, available options are '[nameError refused]'. dnsRejectResponseCode: refused # -- Allow the DNS proxy to compress responses to endpoints that are larger than 512 Bytes or the EDNS0 option, if present. @@ -3361,12 +3332,10 @@ dnsProxy: proxyResponseMaxDelay: 100ms # -- DNS proxy operation mode (true/false, or unset to use version dependent defaults) # enableTransparentMode: true - # -- SCTP Configuration Values sctp: # -- Enable SCTP support. NOTE: Currently, SCTP support does not support rewriting ports or multihoming. enabled: false - # Configuration for types of authentication for Cilium (beta) authentication: # -- Enable authentication processing and garbage collection. @@ -3377,11 +3346,11 @@ authentication: queueSize: 1024 # -- Buffer size of the channel Cilium uses to receive certificate expiration events from auth handlers. rotatedIdentitiesQueueSize: 1024 - # -- Interval for garbage collection of auth map entries. + # -- Interval for garbage collection of auth map entries. gcInterval: "5m0s" # Configuration for Cilium's service-to-service mutual authentication using TLS handshakes. # Note that this is not full mTLS support without also enabling encryption of some form. - # Current encryption options are Wireguard or IPSec, configured in encryption block above. + # Current encryption options are WireGuard or IPsec, configured in encryption block above. mutual: # -- Port on the agent where mutual authentication handshakes between agents will be performed port: 4250 @@ -3404,20 +3373,26 @@ authentication: existingNamespace: false # -- init container image of SPIRE agent and server initImage: + # @schema + # type: [null, string] + # @schema override: ~ repository: "docker.io/library/busybox" tag: "1.36.1" - digest: "sha256:223ae047b1065bd069aac01ae3ac8088b3ca4a527827e283b85112f29385fb1b" + digest: "sha256:9ae97d36d26566ff84e8893c64a6dc4fe8ca6d1144bf5b87b2b85a32def253c7" useDigest: true pullPolicy: "IfNotPresent" # SPIRE agent configuration agent: # -- SPIRE agent image image: + # @schema + # type: [null, string] + # @schema override: ~ repository: "ghcr.io/spiffe/spire-agent" - tag: "1.8.5" - digest: "sha256:99405637647968245ff9fe215f8bd2bd0ea9807be9725f8bf19fe1b21471e52b" + tag: "1.9.6" + digest: "sha256:5106ac601272a88684db14daf7f54b9a45f31f77bb16a906bd5e87756ee7b97c" useDigest: true pullPolicy: "IfNotPresent" # -- SPIRE agent service account @@ -3435,17 +3410,17 @@ authentication: # to allow the Cilium agent on this node to connect to SPIRE. # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ tolerations: - - key: node.kubernetes.io/not-ready - effect: NoSchedule - - key: node-role.kubernetes.io/master - effect: NoSchedule - - key: node-role.kubernetes.io/control-plane - effect: NoSchedule - - key: node.cloudprovider.kubernetes.io/uninitialized - effect: NoSchedule - value: "true" - - key: CriticalAddonsOnly - operator: "Exists" + - key: node.kubernetes.io/not-ready + effect: NoSchedule + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule + - key: node.cloudprovider.kubernetes.io/uninitialized + effect: NoSchedule + value: "true" + - key: CriticalAddonsOnly + operator: "Exists" # -- SPIRE agent affinity configuration affinity: {} # -- SPIRE agent nodeSelector configuration @@ -3462,10 +3437,13 @@ authentication: server: # -- SPIRE server image image: + # @schema + # type: [null, string] + # @schema override: ~ repository: "ghcr.io/spiffe/spire-server" - tag: "1.8.5" - digest: "sha256:28269265882048dcf0fed32fe47663cd98613727210b8d1a55618826f9bf5428" + tag: "1.9.6" + digest: "sha256:59a0b92b39773515e25e68a46c40d3b931b9c1860bc445a79ceb45a805cab8b4" useDigest: true pullPolicy: "IfNotPresent" # -- SPIRE server service account @@ -3502,6 +3480,9 @@ authentication: size: 1Gi # -- Access mode of the SPIRE server data storage accessMode: ReadWriteOnce + # @schema + # type: [null, string] + # @schema # -- StorageClass of the SPIRE server data storage storageClass: null # -- Security context to be added to spire server pods. @@ -3522,6 +3503,9 @@ authentication: country: "US" organization: "SPIRE" commonName: "Cilium SPIRE CA" + # @schema + # type: [null, string] + # @schema # -- SPIRE server address used by Cilium Operator # # If k8s Service DNS along with port number is used (e.g. ..svc(.*): format), diff --git a/argocd-helm-charts/cilium/charts/cilium/values.yaml.tmpl b/argocd-helm-charts/cilium/charts/cilium/values.yaml.tmpl index 679b4cac0..e46a03943 100644 --- a/argocd-helm-charts/cilium/charts/cilium/values.yaml.tmpl +++ b/argocd-helm-charts/cilium/charts/cilium/values.yaml.tmpl @@ -1,13 +1,18 @@ -# upgradeCompatibility helps users upgrading to ensure that the configMap for +# @schema +# type: [null, string] +# @schema +# -- upgradeCompatibility helps users upgrading to ensure that the configMap for # Cilium will not change critical values to ensure continued operation # This flag is not required for new installations. -# For example: 1.7, 1.8, 1.9 -# upgradeCompatibility: '1.8' - +# For example: '1.7', '1.8', '1.9' +upgradeCompatibility: null debug: # -- Enable debug logging enabled: false + # @schema + # type: [null, string] + # @schema # -- Configure verbosity levels for debug logging # This option is used to enable debug messages for operations related to such # sub-system such as (e.g. kvstore, envoy, datapath or policy), and flow is @@ -21,45 +26,55 @@ debug: # - datapath # - policy verbose: ~ - rbac: # -- Enable creation of Resource-Based Access Control configuration. create: true # -- Configure image pull secrets for pulling container images -imagePullSecrets: +imagePullSecrets: [] # - name: "image-pull-secret" # -- (string) Kubernetes config path # @default -- `"~/.kube/config"` kubeConfigPath: "" -# -- (string) Kubernetes service host +# -- (string) Kubernetes service host - use "auto" for automatic lookup from the cluster-info ConfigMap (kubeadm-based clusters only) k8sServiceHost: "" +# @schema +# type: [string, integer] +# @schema # -- (string) Kubernetes service port k8sServicePort: "" - # -- Configure the client side rate limit for the agent and operator # # If the amount of requests to the Kubernetes API server exceeds the configured # rate limit, the agent and operator will start to throttle requests by delaying # them until there is budget or the request times out. k8sClientRateLimit: + # @schema + # type: [null, integer] + # @schema # -- (int) The sustained request rate in requests per second. # @default -- 5 for k8s up to 1.26. 10 for k8s version 1.27+ qps: + # @schema + # type: [null, integer] + # @schema # -- (int) The burst request rate in requests per second. # The rate limiter will allow short bursts with a higher rate. # @default -- 10 for k8s up to 1.26. 20 for k8s version 1.27+ burst: - cluster: # -- Name of the cluster. Only required for Cluster Mesh and mutual authentication with SPIRE. + # It must respect the following constraints: + # * It must contain at most 32 characters; + # * It must begin and end with a lower case alphanumeric character; + # * It may contain lower case alphanumeric characters and dashes between. + # The "default" name cannot be used if the Cluster ID is different from 0. name: default # -- (int) Unique ID of the cluster. Must be unique across all connected # clusters and in the range of 1 to 255. Only required for Cluster Mesh, # may be 0 if Cluster Mesh is not used. id: 0 - # -- Define serviceAccount names for components. # @default -- Component's fully qualified name. serviceAccounts: @@ -84,11 +99,6 @@ serviceAccounts: name: cilium-envoy automount: true annotations: {} - etcd: - create: true - name: cilium-etcd-operator - automount: true - annotations: {} operator: create: true name: cilium-operator @@ -126,21 +136,19 @@ serviceAccounts: name: hubble-generate-certs automount: true annotations: {} - # -- Configure termination grace period for cilium-agent DaemonSet. terminationGracePeriodSeconds: 1 - # -- Install the cilium agent resources. agent: true - # -- Agent container name. name: cilium - # -- Roll out cilium agent pods automatically when configmap is updated. rollOutCiliumPods: false - # -- Agent container image. image: + # @schema + # type: [null, string] + # @schema override: ~ repository: "${CILIUM_REPO}" tag: "${CILIUM_VERSION}" @@ -148,60 +156,51 @@ image: # cilium-digest digest: ${CILIUM_DIGEST} useDigest: ${USE_DIGESTS} - # -- Affinity for cilium-agent. affinity: podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - - topologyKey: kubernetes.io/hostname - labelSelector: - matchLabels: - k8s-app: cilium - + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: cilium # -- Node selector for cilium-agent. nodeSelector: kubernetes.io/os: linux - # -- Node tolerations for agent scheduling to nodes with taints # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ tolerations: -- operator: Exists - # - key: "key" - # operator: "Equal|Exists" - # value: "value" - # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" - + - operator: Exists + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" # -- The priority class to use for cilium-agent. priorityClassName: "" - # -- DNS policy for Cilium agent pods. # Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy dnsPolicy: "" - # -- Additional containers added to the cilium DaemonSet. extraContainers: [] - +# -- Additional initContainers added to the cilium Daemonset. +extraInitContainers: [] # -- Additional agent container arguments. extraArgs: [] - # -- Additional agent container environment variables. extraEnv: [] - # -- Additional agent hostPath mounts. extraHostPathMounts: [] - # - name: host-mnt-data - # mountPath: /host/mnt/data - # hostPath: /mnt/data - # hostPathType: Directory - # readOnly: true - # mountPropagation: HostToContainer +# - name: host-mnt-data +# mountPath: /host/mnt/data +# hostPath: /mnt/data +# hostPathType: Directory +# readOnly: true +# mountPropagation: HostToContainer # -- Additional agent volumes. extraVolumes: [] - # -- Additional agent volumeMounts. extraVolumeMounts: [] - # -- extraConfig allows you to specify additional configuration parameters to be # included in the cilium-config configmap. extraConfig: {} @@ -213,7 +212,6 @@ extraConfig: {} # -- Annotations to be added to all top-level cilium-agent objects (resources under templates/cilium-agent) annotations: {} - # -- Security Context for cilium-agent pods. podSecurityContext: # -- AppArmorProfile options for the `cilium-agent` and init containers @@ -221,23 +219,20 @@ podSecurityContext: type: "Unconfined" # -- Annotations to be added to agent pods podAnnotations: {} - # -- Labels to be added to agent pods podLabels: {} - # -- Agent resource limits & requests # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ resources: {} - # limits: - # cpu: 4000m - # memory: 4Gi - # requests: - # cpu: 100m - # memory: 512Mi +# limits: +# cpu: 4000m +# memory: 4Gi +# requests: +# cpu: 100m +# memory: 512Mi # -- resources & limits for the agent init containers initResources: {} - securityContext: # -- User to run the pod with # runAsUser: 0 @@ -265,6 +260,7 @@ securityContext: - IPC_LOCK # Used in iptables. Consider removing once we are iptables-free - SYS_MODULE + # Needed to switch network namespaces (used for health endpoint, socket-LB). # We need it for now but might not need it for >= 5.11 specially # for the 'SYS_RESOURCE'. # In >= 5.8 there's already BPF and PERMON capabilities @@ -319,28 +315,31 @@ securityContext: # If available, SYS_ADMIN can be removed. #- PERFMON #- BPF - # -- Cilium agent update strategy updateStrategy: type: RollingUpdate rollingUpdate: + # @schema + # type: [integer, string] + # @schema maxUnavailable: 2 - # Configuration Values for cilium-agent - aksbyocni: # -- Enable AKS BYOCNI integration. # Note that this is incompatible with AKS clusters not created in BYOCNI mode: # use Azure integration (`azure.enabled`) instead. enabled: false - +# @schema +# type: [boolean, string] +# @schema # -- Enable installation of PodCIDR routes between worker # nodes if worker nodes share a common L2 network segment. autoDirectNodeRoutes: false - +# -- Enable skipping of PodCIDR routes between worker +# nodes if the worker nodes are in a different L2 network segment. +directRoutingSkipUnreachable: false # -- Annotate k8s node upon initialization with Cilium's metadata. annotateK8sNode: false - azure: # -- Enable Azure integration. # Note that this is incompatible with AKS clusters created in BYOCNI mode: use @@ -353,11 +352,9 @@ azure: # clientID: 00000000-0000-0000-0000-000000000000 # clientSecret: 00000000-0000-0000-0000-000000000000 # userAssignedIdentityID: 00000000-0000-0000-0000-000000000000 - alibabacloud: # -- Enable AlibabaCloud ENI integration enabled: false - # -- Enable bandwidth manager to optimize TCP and UDP workloads and allow # for rate-limiting traffic from individual Pods with EDT (Earliest Departure # Time) through the "kubernetes.io/egress-bandwidth" Pod annotation. @@ -366,19 +363,16 @@ bandwidthManager: enabled: false # -- Activate BBR TCP congestion control for Pods bbr: false - # -- Configure standalone NAT46/NAT64 gateway nat46x64Gateway: # -- Enable RFC8215-prefixed translation enabled: false - # -- EnableHighScaleIPcache enables the special ipcache mode for high scale # clusters. The ipcache content will be reduced to the strict minimum and # traffic will be encapsulated to carry security identities. highScaleIPcache: # -- Enable the high scale mode for the ipcache. enabled: false - # -- Configure L2 announcements l2announcements: # -- Enable L2 announcements @@ -389,14 +383,12 @@ l2announcements: # leaseRenewDeadline: 5s # -- The timeout between retries if renewal fails # leaseRetryPeriod: 2s - # -- Configure L2 pod announcements l2podAnnouncements: # -- Enable L2 pod announcements enabled: false # -- Interface used for sending Gratuitous ARP pod announcements interface: "eth0" - # -- Configure BGP bgp: # -- Enable BGP support inside Cilium; embeds a new ConfigMap for BGP inside @@ -407,7 +399,6 @@ bgp: loadbalancerIP: false # -- Enable announcement of node pod CIDR podCIDR: false - # -- This feature set enables virtual BGP routers to be created via # CiliumBGPPeeringPolicy CRDs. bgpControlPlane: @@ -419,12 +410,10 @@ bgpControlPlane: create: false # -- The name of the secret namespace to which Cilium agents are given read access name: kube-system - pmtuDiscovery: # -- Enable path MTU discovery to send ICMP fragmentation-needed replies to # the client. enabled: false - bpf: autoMount: # -- Enable automatic mount of BPF filesystem @@ -436,122 +425,154 @@ bpf: enabled: true # -- Configure the mount point for the BPF filesystem root: /sys/fs/bpf - # -- Enables pre-allocation of eBPF map values. This increases # memory usage but can reduce latency. preallocateMaps: false - + # @schema + # type: [null, integer] + # @schema # -- (int) Configure the maximum number of entries in auth map. # @default -- `524288` authMapMax: ~ - + # @schema + # type: [null, integer] + # @schema # -- (int) Configure the maximum number of entries in the TCP connection tracking # table. # @default -- `524288` ctTcpMax: ~ - + # @schema + # type: [null, integer] + # @schema # -- (int) Configure the maximum number of entries for the non-TCP connection # tracking table. # @default -- `262144` ctAnyMax: ~ - + # -- Control events generated by the Cilium datapath exposed to Cilium monitor and Hubble. + events: + drop: + # -- Enable drop events. + enabled: true + policyVerdict: + # -- Enable policy verdict events. + enabled: true + trace: + # -- Enable trace events. + enabled: true + # @schema + # type: [null, integer] + # @schema # -- Configure the maximum number of service entries in the # load balancer maps. lbMapMax: 65536 - + # @schema + # type: [null, integer] + # @schema # -- (int) Configure the maximum number of entries for the NAT table. # @default -- `524288` natMax: ~ - + # @schema + # type: [null, integer] + # @schema # -- (int) Configure the maximum number of entries for the neighbor table. # @default -- `524288` neighMax: ~ - # @schema # type: [null, integer] # @schema # @default -- `16384` # -- (int) Configures the maximum number of entries for the node table. nodeMapMax: ~ - # -- Configure the maximum number of entries in endpoint policy map (per endpoint). # @schema # type: [null, integer] # @schema policyMapMax: 16384 - + # @schema + # type: [null, number] + # @schema # -- (float64) Configure auto-sizing for all BPF maps based on available memory. # ref: https://docs.cilium.io/en/stable/network/ebpf/maps/ # @default -- `0.0025` mapDynamicSizeRatio: ~ - # -- Configure the level of aggregation for monitor notifications. # Valid options are none, low, medium, maximum. monitorAggregation: medium - # -- Configure the typical time between monitor notifications for # active connections. monitorInterval: "5s" - # -- Configure which TCP flags trigger notifications when seen for the # first time in a connection. monitorFlags: "all" - # -- Allow cluster external access to ClusterIP services. lbExternalClusterIP: false - + # @schema + # type: [null, boolean] + # @schema # -- (bool) Enable native IP masquerade support in eBPF # @default -- `false` masquerade: ~ - + # @schema + # type: [null, boolean] + # @schema # -- (bool) Configure whether direct routing mode should route traffic via # host stack (true) or directly and more efficiently out of BPF (false) if # the kernel supports it. The latter has the implication that it will also # bypass netfilter in the host namespace. # @default -- `false` hostLegacyRouting: ~ - + # @schema + # type: [null, boolean] + # @schema # -- (bool) Configure the eBPF-based TPROXY to reduce reliance on iptables rules # for implementing Layer 7 policy. # @default -- `false` tproxy: ~ - + # @schema + # type: [null, array] + # @schema # -- (list) Configure explicitly allowed VLAN id's for bpf logic bypass. # [0] will allow all VLAN id's without any filtering. # @default -- `[]` vlanBypass: ~ - + # -- (bool) Disable ExternalIP mitigation (CVE-2020-8554) + # @default -- `false` + disableExternalIPMitigation: false + # -- (bool) Attach endpoint programs using tcx instead of legacy tc hooks on + # supported kernels. + # @default -- `true` + enableTCX: true + # -- (string) Mode for Pod devices for the core datapath (veth, netkit, netkit-l2, lb-only) + # @default -- `veth` + datapathMode: veth # -- Enable BPF clock source probing for more efficient tick retrieval. bpfClockProbe: false - # -- Clean all eBPF datapath state from the initContainer of the cilium-agent # DaemonSet. # # WARNING: Use with care! cleanBpfState: false - # -- Clean all local Cilium state from the initContainer of the cilium-agent # DaemonSet. Implies cleanBpfState: true. # # WARNING: Use with care! cleanState: false - # -- Wait for KUBE-PROXY-CANARY iptables rule to appear in "wait-for-kube-proxy" # init container before launching cilium-agent. # More context can be found in the commit message of below PR # https://github.com/cilium/cilium/pull/20123 waitForKubeProxy: false - cni: # -- Install the CNI configuration and binary files into the filesystem. install: true - # -- Remove the CNI configuration and binary files on agent shutdown. Enable this # if you're removing Cilium from the cluster. Disable this to prevent the CNI # configuration file from being removed during agent upgrade, which can cause # nodes to go unmanageable. uninstall: false - + # @schema + # type: [null, string] + # @schema # -- Configure chaining on top of other CNI plugins. Possible values: # - none # - aws-cni @@ -559,34 +580,30 @@ cni: # - generic-veth # - portmap chainingMode: ~ - + # @schema + # type: [null, string] + # @schema # -- A CNI network name in to which the Cilium plugin should be added as a chained plugin. # This will cause the agent to watch for a CNI network with this network name. When it is # found, this will be used as the basis for Cilium's CNI configuration file. If this is # set, it assumes a chaining mode of generic-veth. As a special case, a chaining mode # of aws-cni implies a chainingTarget of aws-cni. chainingTarget: ~ - # -- Make Cilium take ownership over the `/etc/cni/net.d` directory on the # node, renaming all non-Cilium CNI configurations to `*.cilium_bak`. # This ensures no Pods can be scheduled using other CNI plugins during Cilium # agent downtime. exclusive: true - # -- Configure the log file for CNI logging with retention policy of 7 days. # Disable CNI file logging by setting this field to empty explicitly. logFile: /var/run/cilium/cilium-cni.log - # -- Skip writing of the CNI configuration. This can be used if # writing of the CNI configuration is performed by external automation. customConf: false - # -- Configure the path to the CNI configuration directory on the host. confPath: /etc/cni/net.d - # -- Configure the path to the CNI binary directory on the host. binPath: /opt/cni/bin - # -- Specify the path to a CNI config to read from on agent start. # This can be useful if you want to manage your CNI # configuration outside of a Kubernetes environment. This parameter is @@ -602,59 +619,48 @@ cni: # -- Configure the key in the CNI ConfigMap to read the contents of # the CNI configuration from. configMapKey: cni-config - # -- Configure the path to where to mount the ConfigMap inside the agent pod. confFileMountPath: /tmp/cni-configuration - # -- Configure the path to where the CNI configuration directory is mounted # inside the agent pod. hostConfDirMountPath: /host/etc/cni/net.d - # -- Specifies the resources for the cni initContainer resources: requests: cpu: 100m memory: 10Mi - + # -- Enable route MTU for pod netns when CNI chaining is used + enableRouteMTUForCNIChaining: false # -- (string) Configure how frequently garbage collection should occur for the datapath # connection tracking table. # @default -- `"0s"` conntrackGCInterval: "" - # -- (string) Configure the maximum frequency for the garbage collection of the # connection tracking table. Only affects the automatic computation for the frequency # and has no effect when 'conntrackGCInterval' is set. This can be set to more frequently # clean up unused identities created from ToFQDN policies. conntrackGCMaxInterval: "" - -# -- Configure container runtime specific integration. -# Deprecated in favor of bpf.autoMount.enabled. To be removed in 1.15. -containerRuntime: - # -- Enables specific integrations for container runtimes. - # Supported values: - # - crio - # - none - integration: none - # -- (string) Configure timeout in which Cilium will exit if CRDs are not available # @default -- `"5m"` crdWaitTimeout: "" - # -- Tail call hooks for custom eBPF programs. customCalls: # -- Enable tail call hooks for custom eBPF programs. enabled: false - daemon: # -- Configure where Cilium runtime state should be stored. runPath: "/var/run/cilium" - + # @schema + # type: [null, string] + # @schema # -- Configure a custom list of possible configuration override sources # The default is "config-map:cilium-config,cilium-node-config". For supported # values, see the help text for the build-config subcommand. # Note that this value should be a comma-separated string. configSources: ~ - + # @schema + # type: [null, string] + # @schema # -- allowedConfigOverrides is a list of config-map keys that can be overridden. # That is to say, if this value is set, config sources (excepting the first one) can # only override keys in this list. @@ -664,7 +670,9 @@ daemon: # By default, all keys may be overridden. To disable overrides, set this to "none" or # change the configSources variable. allowedConfigOverrides: ~ - + # @schema + # type: [null, string] + # @schema # -- blockedConfigOverrides is a list of config-map keys that may not be overridden. # In other words, if any of these keys appear in a configuration source excepting the # first one, they will be ignored @@ -673,7 +681,6 @@ daemon: # # By default, all keys may be overridden. blockedConfigOverrides: ~ - # -- Specify which network interfaces can run the eBPF datapath. This means # that a packet sent from a pod to a destination outside the cluster will be # masqueraded (to an output device IPv4 address), if the output device runs the @@ -685,7 +692,12 @@ daemon: # devices. When devices change the eBPF datapath is reloaded and services updated. # If "devices" is set then only those devices, or devices matching a wildcard will # be considered. -enableRuntimeDeviceDetection: false +# +# This option has been deprecated and is a no-op. +enableRuntimeDeviceDetection: true + +# -- Forces the auto-detection of devices, even if specific devices are explicitly listed +forceDeviceDetection: false # -- Chains to ignore when installing feeder rules. # disableIptablesFeederRules: "" @@ -699,65 +711,78 @@ enableRuntimeDeviceDetection: false # -- Enable Kubernetes EndpointSlice feature in Cilium if the cluster supports it. # enableK8sEndpointSlice: true -# -- Enable CiliumEndpointSlice feature. +# -- Enable CiliumEndpointSlice feature (deprecated, please use `ciliumEndpointSlice.enabled` instead). enableCiliumEndpointSlice: false +ciliumEndpointSlice: + # -- Enable Cilium EndpointSlice feature. + enabled: false + # -- List of rate limit options to be used for the CiliumEndpointSlice controller. + # Each object in the list must have the following fields: + # nodes: Count of nodes at which to apply the rate limit. + # limit: The sustained request rate in requests per second. The maximum rate that can be configured is 50. + # burst: The burst request rate in requests per second. The maximum burst that can be configured is 100. + rateLimits: + - nodes: 0 + limit: 10 + burst: 20 + - nodes: 100 + limit: 7 + burst: 15 + - nodes: 500 + limit: 5 + burst: 10 + envoyConfig: # -- Enable CiliumEnvoyConfig CRD # CiliumEnvoyConfig CRD can also be implicitly enabled by other options. enabled: false - # -- SecretsNamespace is the namespace in which envoy SDS will retrieve secrets from. secretsNamespace: # -- Create secrets namespace for CiliumEnvoyConfig CRDs. create: true - # -- The name of the secret namespace to which Cilium agents are given read access. name: cilium-secrets - + # -- Interval in which an attempt is made to reconcile failed EnvoyConfigs. If the duration is zero, the retry is deactivated. + retryInterval: 15s ingressController: # -- Enable cilium ingress controller # This will automatically set enable-envoy-config as well. enabled: false - # -- Set cilium ingress controller to be the default ingress controller # This will let cilium ingress controller route entries without ingress class set default: false - # -- Default ingress load balancer mode # Supported values: shared, dedicated - # For granular control, use the following annotations on the ingress resource - # ingress.cilium.io/loadbalancer-mode: shared|dedicated, + # For granular control, use the following annotations on the ingress resource: + # "ingress.cilium.io/loadbalancer-mode: dedicated" (or "shared"). loadbalancerMode: dedicated - # -- Enforce https for host having matching TLS host in Ingress. # Incoming traffic to http listener will return 308 http error code with respective location in header. enforceHttps: true - # -- Enable proxy protocol for all Ingress listeners. Note that _only_ Proxy protocol traffic will be accepted once this is enabled. enableProxyProtocol: false - # -- IngressLBAnnotations are the annotation and label prefixes, which are used to filter annotations and/or labels to propagate from Ingress to the Load Balancer service - ingressLBAnnotationPrefixes: ['service.beta.kubernetes.io', 'service.kubernetes.io', 'cloud.google.com'] - + ingressLBAnnotationPrefixes: ['lbipam.cilium.io', 'nodeipam.cilium.io', 'service.beta.kubernetes.io', 'service.kubernetes.io', 'cloud.google.com'] + # @schema + # type: [null, string] + # @schema # -- Default secret namespace for ingresses without .spec.tls[].secretName set. defaultSecretNamespace: - + # @schema + # type: [null, string] + # @schema # -- Default secret name for ingresses without .spec.tls[].secretName set. defaultSecretName: - # -- SecretsNamespace is the namespace in which envoy SDS will retrieve TLS secrets from. secretsNamespace: # -- Create secrets namespace for Ingress. create: true - # -- Name of Ingress secret namespace. name: cilium-secrets - # -- Enable secret sync, which will make sure all TLS secrets used by Ingress are synced to secretsNamespace.name. # If disabled, TLS secrets must be maintained externally. sync: true - # -- Load-balancer service in shared mode. # This is a single load-balancer service for all Ingress resources. service: @@ -769,130 +794,153 @@ ingressController: annotations: {} # -- Service type for the shared LB service type: LoadBalancer + # @schema + # type: [null, integer] + # @schema # -- Configure a specific nodePort for insecure HTTP traffic on the shared LB service insecureNodePort: ~ + # @schema + # type: [null, integer] + # @schema # -- Configure a specific nodePort for secure HTTPS traffic on the shared LB service - secureNodePort : ~ + secureNodePort: ~ + # @schema + # type: [null, string] + # @schema # -- Configure a specific loadBalancerClass on the shared LB service (requires Kubernetes 1.24+) loadBalancerClass: ~ + # @schema + # type: [null, string] + # @schema # -- Configure a specific loadBalancerIP on the shared LB service - loadBalancerIP : ~ + loadBalancerIP: ~ + # @schema + # type: [null, boolean] + # @schema # -- Configure if node port allocation is required for LB service # ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-nodeport-allocation allocateLoadBalancerNodePorts: ~ - + # -- Control how traffic from external sources is routed to the LoadBalancer Kubernetes Service for Cilium Ingress in shared mode. + # Valid values are "Cluster" and "Local". + # ref: https://kubernetes.io/docs/reference/networking/virtual-ips/#external-traffic-policy + externalTrafficPolicy: Cluster + # Host Network related configuration + hostNetwork: + # -- Configure whether the Envoy listeners should be exposed on the host network. + enabled: false + # -- Configure a specific port on the host network that gets used for the shared listener. + sharedListenerPort: 8080 + # Specify the nodes where the Ingress listeners should be exposed + nodes: + # -- Specify the labels of the nodes where the Ingress listeners should be exposed + # + # matchLabels: + # kubernetes.io/os: linux + # kubernetes.io/hostname: kind-worker + matchLabels: {} gatewayAPI: # -- Enable support for Gateway API in cilium # This will automatically set enable-envoy-config as well. enabled: false + # -- Enable proxy protocol for all GatewayAPI listeners. Note that _only_ Proxy protocol traffic will be accepted once this is enabled. + enableProxyProtocol: false + # -- Enable Backend Protocol selection support (GEP-1911) for Gateway API via appProtocol. + enableAppProtocol: false + # -- Enable ALPN for all listeners configured with Gateway API. ALPN will attempt HTTP/2, then HTTP 1.1. + # Note that this will also enable `appProtocol` support, and services that wish to use HTTP/2 will need to indicate that via their `appProtocol`. + enableAlpn: false + # -- The number of additional GatewayAPI proxy hops from the right side of the HTTP header to trust when determining the origin client's IP address. + xffNumTrustedHops: 0 + # -- Control how traffic from external sources is routed to the LoadBalancer Kubernetes Service for all Cilium GatewayAPI Gateway instances. Valid values are "Cluster" and "Local". + # Note that this value will be ignored when `hostNetwork.enabled == true`. + # ref: https://kubernetes.io/docs/reference/networking/virtual-ips/#external-traffic-policy + externalTrafficPolicy: Cluster + + gatewayClass: + # -- Enable creation of GatewayClass resource + # The default value is 'auto' which decides according to presence of gateway.networking.k8s.io/v1/GatewayClass in the cluster. + # Other possible values are 'true' and 'false', which will either always or never create the GatewayClass, respectively. + create: auto # -- SecretsNamespace is the namespace in which envoy SDS will retrieve TLS secrets from. secretsNamespace: # -- Create secrets namespace for Gateway API. create: true - # -- Name of Gateway API secret namespace. name: cilium-secrets - # -- Enable secret sync, which will make sure all TLS secrets used by Ingress are synced to secretsNamespace.name. # If disabled, TLS secrets must be maintained externally. sync: true + # Host Network related configuration + hostNetwork: + # -- Configure whether the Envoy listeners should be exposed on the host network. + enabled: false + # Specify the nodes where the Ingress listeners should be exposed + nodes: + # -- Specify the labels of the nodes where the Ingress listeners should be exposed + # + # matchLabels: + # kubernetes.io/os: linux + # kubernetes.io/hostname: kind-worker + matchLabels: {} # -- Enables the fallback compatibility solution for when the xt_socket kernel # module is missing and it is needed for the datapath L7 redirection to work # properly. See documentation for details on when this can be disabled: # https://docs.cilium.io/en/stable/operations/system_requirements/#linux-kernel. enableXTSocketFallback: true - encryption: # -- Enable transparent network encryption. enabled: false - # -- Encryption method. Can be either ipsec or wireguard. type: ipsec - # -- Enable encryption for pure node to node traffic. # This option is only effective when encryption.type is set to "wireguard". nodeEncryption: false - # -- Configure the WireGuard Pod2Pod strict mode. strictMode: # -- Enable WireGuard Pod2Pod strict mode. enabled: false - # -- CIDR for the WireGuard Pod2Pod strict mode. cidr: "" - # -- Allow dynamic lookup of remote node identities. # This is required when tunneling is used or direct routing is used and the node CIDR and pod CIDR overlap. allowRemoteNodeIdentities: false - ipsec: # -- Name of the key file inside the Kubernetes secret configured via secretName. - keyFile: "" - + keyFile: keys # -- Path to mount the secret inside the Cilium pod. - mountPath: "" - + mountPath: /etc/ipsec # -- Name of the Kubernetes secret containing the encryption keys. - secretName: "" - + secretName: cilium-ipsec-keys # -- The interface to use for encrypted traffic. interface: "" - # -- Enable the key watcher. If disabled, a restart of the agent will be # necessary on key rotations. keyWatcher: true - # -- Maximum duration of the IPsec key rotation. The previous key will be # removed after that delay. keyRotationDuration: "5m" - + # -- Enable IPsec encrypted overlay + encryptedOverlay: false wireguard: - # -- Enables the fallback to the user-space implementation. + # -- Enables the fallback to the user-space implementation (deprecated). userspaceFallback: false - # -- Controls Wireguard PersistentKeepalive option. Set 0s to disable. + # -- Controls WireGuard PersistentKeepalive option. Set 0s to disable. persistentKeepalive: 0s - - # -- Deprecated in favor of encryption.ipsec.keyFile. To be removed in 1.15. - # Name of the key file inside the Kubernetes secret configured via secretName. - # This option is only effective when encryption.type is set to ipsec. - keyFile: keys - - # -- Deprecated in favor of encryption.ipsec.mountPath. To be removed in 1.15. - # Path to mount the secret inside the Cilium pod. - # This option is only effective when encryption.type is set to ipsec. - mountPath: /etc/ipsec - - # -- Deprecated in favor of encryption.ipsec.secretName. To be removed in 1.15. - # Name of the Kubernetes secret containing the encryption keys. - # This option is only effective when encryption.type is set to ipsec. - secretName: cilium-ipsec-keys - - # -- Deprecated in favor of encryption.ipsec.interface. To be removed in 1.15. - # The interface to use for encrypted traffic. - # This option is only effective when encryption.type is set to ipsec. - interface: "" - endpointHealthChecking: # -- Enable connectivity health checking between virtual endpoints. enabled: true - -# -- Enable endpoint status. -# Status can be: policy, health, controllers, log and / or state. For 2 or more options use a space. -endpointStatus: - enabled: false - status: "" - endpointRoutes: + # @schema + # type: [boolean, string] + # @schema # -- Enable use of per endpoint routes instead of routing via # the cilium_host interface. enabled: false - k8sNetworkPolicy: # -- Enable support for K8s NetworkPolicy enabled: true - eni: # -- Enable Elastic Network Interface (ENI) integration. enabled: false @@ -931,47 +979,42 @@ eni: # -- Filter via AWS EC2 Instance tags (k=v) which will dictate which AWS EC2 Instances # are going to be used to create new ENIs instanceTagsFilter: [] - externalIPs: # -- Enable ExternalIPs service support. enabled: false - # fragmentTracking enables IPv4 fragment tracking support in the datapath. # fragmentTracking: true - gke: # -- Enable Google Kubernetes Engine integration enabled: false - # -- Enable connectivity health checking. healthChecking: true - # -- TCP port for the agent health API. This is not the port for cilium-health. healthPort: 9879 - # -- Configure the host firewall. hostFirewall: # -- Enables the enforcement of host policies in the eBPF datapath. enabled: false - hostPort: # -- Enable hostPort service support. enabled: false - # -- Configure socket LB socketLB: # -- Enable socket LB enabled: false - # -- Disable socket lb for non-root ns. This is used to enable Istio routing rules. # hostNamespaceOnly: false - + # -- Enable terminating pod connections to deleted service backends. + # terminatePodConnections: true # -- Configure certificate generation for Hubble integration. # If hubble.tls.auto.method=cronJob, these values are used # for the Kubernetes CronJob which will be scheduled regularly to # (re)generate any certificates not provided manually. certgen: image: + # @schema + # type: [null, string] + # @schema override: ~ repository: "${CERTGEN_REPO}" tag: "${CERTGEN_VERSION}" @@ -989,23 +1032,17 @@ certgen: # -- Node tolerations for pod assignment on nodes with taints # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ tolerations: [] - # -- Additional certgen volumes. extraVolumes: [] - # -- Additional certgen volumeMounts. extraVolumeMounts: [] - # -- Affinity for certgen affinity: {} - hubble: # -- Enable Hubble (true by default). enabled: true - # -- Annotations to be added to all top-level hubble objects (resources under templates/hubble) annotations: {} - # -- Buffer size of the channel Hubble uses to receive monitor events. If this # value is not set, the queue size is set to the default monitor queue size. # eventQueueSize: "" @@ -1020,6 +1057,9 @@ hubble: # See https://docs.cilium.io/en/stable/observability/metrics/#hubble-metrics # for more comprehensive documentation about Hubble metrics. metrics: + # @schema + # type: [null, array] + # @schema # -- Configures the list of metrics to collect. If empty or null, metrics # are disabled. # Example: @@ -1041,6 +1081,32 @@ hubble: enableOpenMetrics: false # -- Configure the port the hubble metric server listens on. port: 9965 + tls: + # Enable hubble metrics server TLS. + enabled: false + # Configure hubble metrics server TLS. + server: + # -- base64 encoded PEM values for the Hubble metrics server certificate. + cert: "" + # -- base64 encoded PEM values for the Hubble metrics server key. + key: "" + # -- Extra DNS names added to certificate when it's auto generated + extraDnsNames: [] + # -- Extra IP addresses added to certificate when it's auto generated + extraIpAddresses: [] + # -- Configure mTLS for the Hubble metrics server. + mtls: + # When set to true enforces mutual TLS between Hubble Metrics server and its clients. + # False allow non-mutual TLS connections. + # This option has no effect when TLS is disabled. + enabled: false + useSecret: false + # -- Name of the ConfigMap containing the CA to validate client certificates against. + # If mTLS is enabled and this is unspecified, it will default to the + # same CA used for Hubble metrics server certificates. + name: ~ + # -- Entry of the ConfigMap containing the CA. + key: ca.crt # -- Annotations to be added to hubble-metrics service. serviceAnnotations: {} serviceMonitor: @@ -1062,21 +1128,29 @@ hubble: - __meta_kubernetes_pod_node_name targetLabel: node replacement: ${1} + # @schema + # type: [null, array] + # @schema # -- Metrics relabeling configs for the ServiceMonitor hubble metricRelabelings: ~ + # Configure TLS for the ServiceMonitor. + # Note, when using TLS you will either need to specify + # tlsConfig.insecureSkipVerify or specify a CA to use. + tlsConfig: {} # -- Grafana dashboards for hubble # grafana can import dashboards based on the label and value # ref: https://github.com/grafana/helm-charts/tree/main/charts/grafana#sidecar-for-dashboards dashboards: enabled: false label: grafana_dashboard + # @schema + # type: [null, string] + # @schema namespace: ~ labelValue: "1" annotations: {} - # -- Unix domain socket path to listen to when Hubble is enabled. socketPath: /var/run/cilium/hubble.sock - # -- Enables redacting sensitive information present in Layer 7 flows. redact: enabled: false @@ -1151,17 +1225,18 @@ hubble: # --set hubble.redact.enabled="true" # --set hubble.redact.kafka.apiKey="true" apiKey: false - # -- An additional address for Hubble to listen to. # Set this field ":4244" if you are enabling Hubble Relay, as it assumes that # Hubble is listening on port 4244. listenAddress: ":4244" # -- Whether Hubble should prefer to announce IPv6 or IPv4 addresses if both are available. preferIpv6: false + # @schema + # type: [null, boolean] + # @schema # -- (bool) Skip Hubble events with unknown cgroup ids # @default -- `true` skipUnknownCGroupIDs: ~ - peerService: # -- Service Port for the Peer service. # If not set, it is dynamically assigned to port 443 if TLS is enabled and to @@ -1204,7 +1279,6 @@ hubble: # Defaults to midnight of the first day of every fourth month. For syntax, see # https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#schedule-syntax schedule: "0 0 1 */4 *" - # [Example] # certManagerIssuerRef: # group: cert-manager.io @@ -1212,7 +1286,6 @@ hubble: # name: ca-issuer # -- certmanager issuer used when hubble.tls.auto.method=certmanager. certManagerIssuerRef: {} - # -- base64 encoded PEM values for the Hubble server certificate and private key server: cert: "" @@ -1221,99 +1294,91 @@ hubble: extraDnsNames: [] # -- Extra IP addresses added to certificate when it's auto generated extraIpAddresses: [] - relay: # -- Enable Hubble Relay (requires hubble.enabled=true) enabled: false - # -- Roll out Hubble Relay pods automatically when configmap is updated. rollOutPods: false - # -- Hubble-relay container image. image: + # @schema + # type: [null, string] + # @schema override: ~ repository: "${HUBBLE_RELAY_REPO}" tag: "${CILIUM_VERSION}" - # hubble-relay-digest + # hubble-relay-digest digest: ${HUBBLE_RELAY_DIGEST} useDigest: ${USE_DIGESTS} pullPolicy: "${PULL_POLICY}" - # -- Specifies the resources for the hubble-relay pods resources: {} - # -- Number of replicas run for the hubble-relay deployment. replicas: 1 - # -- Affinity for hubble-replay affinity: podAffinity: requiredDuringSchedulingIgnoredDuringExecution: - - topologyKey: kubernetes.io/hostname - labelSelector: - matchLabels: - k8s-app: cilium - + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: cilium # -- Pod topology spread constraints for hubble-relay topologySpreadConstraints: [] - # - maxSkew: 1 - # topologyKey: topology.kubernetes.io/zone - # whenUnsatisfiable: DoNotSchedule + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule # -- Node labels for pod assignment # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector nodeSelector: kubernetes.io/os: linux - # -- Node tolerations for pod assignment on nodes with taints # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ tolerations: [] - # -- Additional hubble-relay environment variables. extraEnv: [] - # -- Annotations to be added to all top-level hubble-relay objects (resources under templates/hubble-relay) annotations: {} - # -- Annotations to be added to hubble-relay pods podAnnotations: {} - # -- Labels to be added to hubble-relay pods podLabels: {} - # PodDisruptionBudget settings podDisruptionBudget: # -- enable PodDisruptionBudget # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ enabled: false + # @schema + # type: [null, integer, string] + # @schema # -- Minimum number/percentage of pods that should remain scheduled. # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` minAvailable: null + # @schema + # type: [null, integer, string] + # @schema # -- Maximum number/percentage of pods that may be made unavailable maxUnavailable: 1 - # -- The priority class to use for hubble-relay priorityClassName: "" - # -- Configure termination grace period for hubble relay Deployment. terminationGracePeriodSeconds: 1 - # -- hubble-relay update strategy updateStrategy: type: RollingUpdate rollingUpdate: + # @schema + # type: [integer, string] + # @schema maxUnavailable: 1 - # -- Additional hubble-relay volumes. extraVolumes: [] - # -- Additional hubble-relay volumeMounts. extraVolumeMounts: [] - # -- hubble-relay pod security context podSecurityContext: fsGroup: 65532 - # -- hubble-relay container security context securityContext: # readOnlyRootFilesystem: true @@ -1322,21 +1387,17 @@ hubble: runAsGroup: 65532 capabilities: drop: - - ALL - + - ALL # -- hubble-relay service configuration. service: # --- The type of service used for Hubble Relay access, either ClusterIP or NodePort. type: ClusterIP # --- The port to use when the service type is set to NodePort. nodePort: 31234 - # -- Host to listen to. Specify an empty string to bind to all the interfaces. listenHost: "" - # -- Port to listen to. listenPort: "4245" - # -- TLS configuration for Hubble Relay tls: # -- base64 encoded PEM values for the hubble-relay client certificate and private key @@ -1363,26 +1424,33 @@ hubble: # -- extra IP addresses added to certificate when its auto gen extraIpAddresses: [] # DNS name used by the backend to connect to the relay - # This is a simple workaround as the relay certificates are currently hardcoded to - # *.hubble-relay.cilium.io + # This is a simple workaround as the relay certificates are currently hardcoded to + # *.hubble-relay.cilium.io # See https://github.com/cilium/cilium/pull/28709#discussion_r1371792546 # For GKE Dataplane V2 this should be set to relay.kube-system.svc.cluster.local relayName: "ui.hubble-relay.cilium.io" - + # @schema + # type: [null, string] + # @schema # -- Dial timeout to connect to the local hubble instance to receive peer information (e.g. "30s"). dialTimeout: ~ - + # @schema + # type: [null, string] + # @schema # -- Backoff duration to retry connecting to the local hubble instance in case of failure (e.g. "30s"). retryTimeout: ~ - - # -- Max number of flows that can be buffered for sorting before being sent to the + # @schema + # type: [null, integer] + # @schema + # -- (int) Max number of flows that can be buffered for sorting before being sent to the # client (per request) (e.g. 100). sortBufferLenMax: ~ - + # @schema + # type: [null, string] + # @schema # -- When the per-request flows sort buffer is not full, a flow is drained every # time this timeout is reached (only affects requests in follow-mode) (e.g. "1s"). sortBufferDrainTimeout: ~ - # -- Port to use for the k8s service backed by hubble-relay pods. # If not set, it is dynamically assigned to port 443 if TLS is enabled and to # port 80 if not. @@ -1406,17 +1474,21 @@ hubble: # -- Specify the Kubernetes namespace where Prometheus expects to find # service monitors configured. # namespace: "" + # @schema + # type: [null, array] + # @schema # -- Relabeling configs for the ServiceMonitor hubble-relay relabelings: ~ + # @schema + # type: [null, array] + # @schema # -- Metrics relabeling configs for the ServiceMonitor hubble-relay metricRelabelings: ~ - gops: # -- Enable gops for hubble-relay enabled: true # -- Configure gops listen port for hubble-relay port: 9893 - pprof: # -- Enable pprof for hubble-relay enabled: false @@ -1424,38 +1496,33 @@ hubble: address: localhost # -- Configure pprof listen port for hubble-relay port: 6062 - ui: # -- Whether to enable the Hubble UI. enabled: false - standalone: # -- When true, it will allow installing the Hubble UI only, without checking dependencies. # It is useful if a cluster already has cilium and Hubble relay installed and you just # want Hubble UI to be deployed. # When installed via helm, installing UI should be done via `helm upgrade` and when installed via the cilium cli, then `cilium hubble enable --ui` enabled: false - tls: # -- When deploying Hubble UI in standalone, with tls enabled for Hubble relay, it is required # to provide a volume for mounting the client certificates. certsVolume: {} - # projected: - # defaultMode: 0400 - # sources: - # - secret: - # name: hubble-ui-client-certs - # items: - # - key: tls.crt - # path: client.crt - # - key: tls.key - # path: client.key - # - key: ca.crt - # path: hubble-relay-ca.crt - + # projected: + # defaultMode: 0400 + # sources: + # - secret: + # name: hubble-ui-client-certs + # items: + # - key: tls.crt + # path: client.crt + # - key: tls.key + # path: client.key + # - key: ca.crt + # path: hubble-relay-ca.crt # -- Roll out Hubble-ui pods automatically when configmap is updated. rollOutPods: false - tls: # -- base64 encoded PEM values used to connect to hubble-relay # This keypair is presented to Hubble Relay instances for mTLS @@ -1464,37 +1531,32 @@ hubble: client: cert: "" key: "" - backend: # -- Hubble-ui backend image. image: + # @schema + # type: [null, string] + # @schema override: ~ repository: "${HUBBLE_UI_BACKEND_REPO}" tag: "${HUBBLE_UI_BACKEND_VERSION}" digest: "${HUBBLE_UI_BACKEND_DIGEST}" useDigest: true pullPolicy: "${PULL_POLICY}" - # -- Hubble-ui backend security context. securityContext: {} - # -- Additional hubble-ui backend environment variables. extraEnv: [] - # -- Additional hubble-ui backend volumes. extraVolumes: [] - # -- Additional hubble-ui backend volumeMounts. extraVolumeMounts: [] - livenessProbe: # -- Enable liveness probe for Hubble-ui backend (requires Hubble-ui 0.12+) enabled: false - readinessProbe: # -- Enable readiness probe for Hubble-ui backend (requires Hubble-ui 0.12+) enabled: false - # -- Resource requests and limits for the 'backend' container of the 'hubble-ui' deployment. resources: {} # limits: @@ -1503,29 +1565,26 @@ hubble: # requests: # cpu: 100m # memory: 64Mi - frontend: # -- Hubble-ui frontend image. image: + # @schema + # type: [null, string] + # @schema override: ~ repository: "${HUBBLE_UI_FRONTEND_REPO}" tag: "${HUBBLE_UI_FRONTEND_VERSION}" digest: "${HUBBLE_UI_FRONTEND_DIGEST}" useDigest: true pullPolicy: "${PULL_POLICY}" - # -- Hubble-ui frontend security context. securityContext: {} - # -- Additional hubble-ui frontend environment variables. extraEnv: [] - # -- Additional hubble-ui frontend volumes. extraVolumes: [] - # -- Additional hubble-ui frontend volumeMounts. extraVolumeMounts: [] - # -- Resource requests and limits for the 'frontend' container of the 'hubble-ui' deployment. resources: {} # limits: @@ -1538,63 +1597,60 @@ hubble: # -- Controls server listener for ipv6 ipv6: enabled: true - # -- The number of replicas of Hubble UI to deploy. replicas: 1 - # -- Annotations to be added to all top-level hubble-ui objects (resources under templates/hubble-ui) annotations: {} - # -- Annotations to be added to hubble-ui pods podAnnotations: {} - # -- Labels to be added to hubble-ui pods podLabels: {} - # PodDisruptionBudget settings podDisruptionBudget: # -- enable PodDisruptionBudget # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ enabled: false + # @schema + # type: [null, integer, string] + # @schema # -- Minimum number/percentage of pods that should remain scheduled. # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` minAvailable: null + # @schema + # type: [null, integer, string] + # @schema # -- Maximum number/percentage of pods that may be made unavailable maxUnavailable: 1 - # -- Affinity for hubble-ui affinity: {} - # -- Pod topology spread constraints for hubble-ui topologySpreadConstraints: [] - # - maxSkew: 1 - # topologyKey: topology.kubernetes.io/zone - # whenUnsatisfiable: DoNotSchedule + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule # -- Node labels for pod assignment # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector nodeSelector: kubernetes.io/os: linux - # -- Node tolerations for pod assignment on nodes with taints # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ tolerations: [] - # -- The priority class to use for hubble-ui priorityClassName: "" - # -- hubble-ui update strategy. updateStrategy: type: RollingUpdate rollingUpdate: + # @schema + # type: [integer, string] + # @schema maxUnavailable: 1 - # -- Security context to be added to Hubble UI pods securityContext: runAsUser: 1001 runAsGroup: 1001 fsGroup: 1001 - # -- hubble-ui service configuration. service: # -- Annotations to be added for the Hubble UI service @@ -1603,18 +1659,16 @@ hubble: type: ClusterIP # --- The port to use when the service type is set to NodePort. nodePort: 31235 - # -- Defines base url prefix for all hubble-ui http requests. # It needs to be changed in case if ingress for hubble-ui is configured under some sub-path. # Trailing `/` is required for custom path, ex. `/service-map/` baseUrl: "/" - # -- hubble-ui ingress configuration. ingress: enabled: false annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" className: "" hosts: - chart-example.local @@ -1623,7 +1677,6 @@ hubble: # - secretName: chart-example-tls # hosts: # - chart-example.local - # -- Hubble flows export. export: # --- Defines max file size of output file before it gets rotated. @@ -1657,35 +1710,44 @@ hubble: createConfigMap: true # ---- Exporters configuration in YAML format. content: - - name: all - fieldMask: [] - includeFilters: [] - excludeFilters: [] - filePath: "/var/run/cilium/hubble/events.log" - #- name: "test002" - # filePath: "/var/log/network/flow-log/pa/test002.log" - # fieldMask: ["source.namespace", "source.pod_name", "destination.namespace", "destination.pod_name", "verdict"] - # includeFilters: - # - source_pod: ["default/"] - # event_type: - # - type: 1 - # - destination_pod: ["frontend/nginx-975996d4c-7hhgt"] - # excludeFilters: [] - # end: "2023-10-09T23:59:59-07:00" + - name: all + fieldMask: [] + includeFilters: [] + excludeFilters: [] + filePath: "/var/run/cilium/hubble/events.log" + # - name: "test002" + # filePath: "/var/log/network/flow-log/pa/test002.log" + # fieldMask: ["source.namespace", "source.pod_name", "destination.namespace", "destination.pod_name", "verdict"] + # includeFilters: + # - source_pod: ["default/"] + # event_type: + # - type: 1 + # - destination_pod: ["frontend/nginx-975996d4c-7hhgt"] + # excludeFilters: [] + # end: "2023-10-09T23:59:59-07:00" + + # -- Emit v1.Events related to pods on detection of packet drops. + # This feature is alpha, please provide feedback at https://github.com/cilium/cilium/issues/33975. + dropEventEmitter: + enabled: false + # --- Minimum time between emitting same events. + interval: 2m + # --- Drop reasons to emit events for. + # ref: https://docs.cilium.io/en/stable/_api/v1/flow/README/#dropreason + reasons: + - auth_required + - policy_denied # -- Method to use for identity allocation (`crd` or `kvstore`). identityAllocationMode: "crd" - # -- (string) Time to wait before using new identity on endpoint identity change. # @default -- `"5s"` identityChangeGracePeriod: "" - # -- Install Iptables rules to skip netfilter connection tracking on all pod # traffic. This option is only effective when Cilium is running in direct # routing and full KPR mode. Moreover, this option cannot be enabled when Cilium # is running in a managed Kubernetes environment or in a chained CNI setup. installNoConntrackIptablesRules: false - ipam: # -- Configure IP Address Management mode. # ref: https://docs.cilium.io/en/stable/network/concepts/ipam/ @@ -1693,75 +1755,85 @@ ipam: # -- Maximum rate at which the CiliumNode custom resource is updated. ciliumNodeUpdateRate: "15s" operator: + # @schema + # type: [array, string] + # @schema # -- IPv4 CIDR list range to delegate to individual nodes for IPAM. clusterPoolIPv4PodCIDRList: ["10.0.0.0/8"] # -- IPv4 CIDR mask size to delegate to individual nodes for IPAM. clusterPoolIPv4MaskSize: 24 + # @schema + # type: [array, string] + # @schema # -- IPv6 CIDR list range to delegate to individual nodes for IPAM. clusterPoolIPv6PodCIDRList: ["fd00::/104"] # -- IPv6 CIDR mask size to delegate to individual nodes for IPAM. clusterPoolIPv6MaskSize: 120 # -- IP pools to auto-create in multi-pool IPAM mode. autoCreateCiliumPodIPPools: {} - # default: - # ipv4: - # cidrs: - # - 10.10.0.0/8 - # maskSize: 24 - # other: - # ipv6: - # cidrs: - # - fd00:100::/80 - # maskSize: 96 - # -- The maximum burst size when rate limiting access to external APIs. + # default: + # ipv4: + # cidrs: + # - 10.10.0.0/8 + # maskSize: 24 + # other: + # ipv6: + # cidrs: + # - fd00:100::/80 + # maskSize: 96 + # @schema + # type: [null, integer] + # @schema + # -- (int) The maximum burst size when rate limiting access to external APIs. # Also known as the token bucket capacity. # @default -- `20` externalAPILimitBurstSize: ~ - # -- The maximum queries per second when rate limiting access to + # @schema + # type: [null, number] + # @schema + # -- (float) The maximum queries per second when rate limiting access to # external APIs. Also known as the bucket refill rate, which is used to # refill the bucket up to the burst size capacity. # @default -- `4.0` externalAPILimitQPS: ~ - +nodeIPAM: + # -- Configure Node IPAM + # ref: https://docs.cilium.io/en/stable/network/node-ipam/ + enabled: false +# @schema +# type: [null, string] +# @schema # -- The api-rate-limit option can be used to overwrite individual settings of the default configuration for rate limiting calls to the Cilium Agent API apiRateLimit: ~ - # -- Configure the eBPF-based ip-masq-agent ipMasqAgent: enabled: false # the config of nonMasqueradeCIDRs # config: - # nonMasqueradeCIDRs: [] - # masqLinkLocal: false - # masqLinkLocalIPv6: false +# nonMasqueradeCIDRs: [] +# masqLinkLocal: false +# masqLinkLocalIPv6: false # iptablesLockTimeout defines the iptables "--wait" option when invoked from Cilium. # iptablesLockTimeout: "5s" - ipv4: # -- Enable IPv4 support. enabled: true - ipv6: # -- Enable IPv6 support. enabled: false - # -- Configure Kubernetes specific configuration -k8s: {} +k8s: # -- requireIPv4PodCIDR enables waiting for Kubernetes to provide the PodCIDR # range via the Kubernetes node resource - # requireIPv4PodCIDR: false - + requireIPv4PodCIDR: false # -- requireIPv6PodCIDR enables waiting for Kubernetes to provide the PodCIDR # range via the Kubernetes node resource - # requireIPv6PodCIDR: false - + requireIPv6PodCIDR: false # -- Keep the deprecated selector labels when deploying Cilium DaemonSet. keepDeprecatedLabels: false - # -- Keep the deprecated probes when deploying Cilium DaemonSet keepDeprecatedProbes: false - startupProbe: # -- failure threshold of startup probe. # 105 x 2s translates to the old behaviour of the readiness probe (120s delay + 30 x 3s) @@ -1778,9 +1850,8 @@ readinessProbe: failureThreshold: 3 # -- interval between checks of the readiness probe periodSeconds: 30 - # -- Configure the kube-proxy replacement in Cilium BPF datapath -# Valid options are "true", "false", "disabled" (deprecated), "partial" (deprecated), "strict" (deprecated). +# Valid options are "true" or "false". # ref: https://docs.cilium.io/en/stable/network/kubernetes/kubeproxy-free/ #kubeProxyReplacement: "false" @@ -1789,19 +1860,15 @@ readinessProbe: # addresses and this '[::]:10256' for all ipv6 addresses. # By default it is disabled. kubeProxyReplacementHealthzBindAddr: "" - l2NeighDiscovery: # -- Enable L2 neighbor discovery in the agent enabled: true # -- Override the agent's default neighbor resolution refresh period. refreshPeriod: "30s" - # -- Enable Layer 7 network policy. l7Proxy: true - # -- Enable Local Redirect Policy. localRedirectPolicy: false - # To include or exclude matched resources from cilium identity evaluation # labels: "" @@ -1811,56 +1878,45 @@ localRedirectPolicy: false # -- Enables periodic logging of system load logSystemLoad: false - # -- Configure maglev consistent hashing maglev: {} - # -- tableSize is the size (parameter M) for the backend table of one - # service entry - # tableSize: +# -- tableSize is the size (parameter M) for the backend table of one +# service entry +# tableSize: - # -- hashSeed is the cluster-wide base64 encoded seed for the hashing - # hashSeed: +# -- hashSeed is the cluster-wide base64 encoded seed for the hashing +# hashSeed: # -- Enables masquerading of IPv4 traffic leaving the node from endpoints. enableIPv4Masquerade: true - # -- Enables masquerading of IPv6 traffic leaving the node from endpoints. enableIPv6Masquerade: true - # -- Enables masquerading to the source of the route for traffic leaving the node from endpoints. enableMasqueradeRouteSource: false - # -- Enables IPv4 BIG TCP support which increases maximum IPv4 GSO/GRO limits for nodes and pods enableIPv4BIGTCP: false - # -- Enables IPv6 BIG TCP support which increases maximum IPv6 GSO/GRO limits for nodes and pods enableIPv6BIGTCP: false - egressGateway: # -- Enables egress gateway to redirect and SNAT the traffic that leaves the # cluster. enabled: false - # -- Deprecated without a replacement necessary. - installRoutes: false # -- Time between triggers of egress gateway state reconciliations reconciliationTriggerInterval: 1s # -- Maximum number of entries in egress gateway policy map # maxPolicyEntries: 16384 - vtep: -# -- Enables VXLAN Tunnel Endpoint (VTEP) Integration (beta) to allow -# Cilium-managed pods to talk to third party VTEP devices over Cilium tunnel. + # -- Enables VXLAN Tunnel Endpoint (VTEP) Integration (beta) to allow + # Cilium-managed pods to talk to third party VTEP devices over Cilium tunnel. enabled: false - -# -- A space separated list of VTEP device endpoint IPs, for example "1.1.1.1 1.1.2.1" + # -- A space separated list of VTEP device endpoint IPs, for example "1.1.1.1 1.1.2.1" endpoint: "" -# -- A space separated list of VTEP device CIDRs, for example "1.1.1.0/24 1.1.2.0/24" + # -- A space separated list of VTEP device CIDRs, for example "1.1.1.0/24 1.1.2.0/24" cidr: "" -# -- VTEP CIDRs Mask that applies to all VTEP CIDRs, for example "255.255.255.0" + # -- VTEP CIDRs Mask that applies to all VTEP CIDRs, for example "255.255.255.0" mask: "" -# -- A space separated list of VTEP device MAC addresses (VTEP MAC), for example "x:x:x:x:x:x y:y:y:y:y:y:y" + # -- A space separated list of VTEP device MAC addresses (VTEP MAC), for example "x:x:x:x:x:x y:y:y:y:y:y:y" mac: "" - # -- (string) Allows to explicitly specify the IPv4 CIDR for native routing. # When specified, Cilium assumes networking for this CIDR is preconfigured and # hands traffic destined for that range to the Linux network stack without @@ -1872,7 +1928,6 @@ vtep: # the user must configure the routes to reach pods, either manually or by # setting the auto-direct-node-routes flag. ipv4NativeRoutingCIDR: "" - # -- (string) Allows to explicitly specify the IPv6 CIDR for native routing. # When specified, Cilium assumes networking for this CIDR is preconfigured and # hands traffic destined for that range to the Linux network stack without @@ -1884,12 +1939,10 @@ ipv4NativeRoutingCIDR: "" # the user must configure the routes to reach pods, either manually or by # setting the auto-direct-node-routes flag. ipv6NativeRoutingCIDR: "" - # -- cilium-monitor sidecar. monitor: # -- Enable the cilium-monitor sidecar. enabled: false - # -- Configure service load balancing loadBalancer: # -- standalone enables the standalone L4LB which does not connect to @@ -1910,7 +1963,6 @@ loadBalancer: # path), or best-effort (use native mode XDP acceleration on devices # that support it). acceleration: disabled - # -- dsrDispatch configures whether IP option or IPIP encapsulation is # used to pass a service IP and port to remote backend # dsrDispatch: opt @@ -1939,40 +1991,47 @@ loadBalancer: # service annotation (e.g. service.cilium.io/lb-l7-algorithm) # Applicable values: round_robin, least_request, random algorithm: round_robin - # -- Configure N-S k8s service loadbalancing nodePort: # -- Enable the Cilium NodePort service implementation. enabled: false - # -- Port range to use for NodePort services. # range: "30000,32767" + # @schema + # type: [null, string, array] + # @schema + # -- List of CIDRs for choosing which IP addresses assigned to native devices are used for NodePort load-balancing. + # By default this is empty and the first suitable, preferably private, IPv4 and IPv6 address assigned to each device is used. + # + # Example: + # + # addresses: ["192.168.1.0/24", "2001::/64"] + # + addresses: ~ + # -- Set to true to prevent applications binding to service ports. bindProtection: true - # -- Append NodePort range to ip_local_reserved_ports if clash with ephemeral # ports is detected. autoProtectPortRange: true - # -- Enable healthcheck nodePort server for NodePort services enableHealthCheck: true - # -- Enable access of the healthcheck nodePort on the LoadBalancerIP. Needs # EnableHealthCheck to be enabled enableHealthCheckLoadBalancerIP: false - # policyAuditMode: false # -- The agent can be put into one of the three policy enforcement modes: # default, always and never. # ref: https://docs.cilium.io/en/stable/security/policy/intro/#policy-enforcement-modes policyEnforcementMode: "default" - +# @schema +# type: [null, string, array] +# @schema # -- policyCIDRMatchMode is a list of entities that may be selected by CIDR selector. # The possible value is "nodes". policyCIDRMatchMode: - pprof: # -- Enable pprof for cilium-agent enabled: false @@ -1980,7 +2039,6 @@ pprof: address: localhost # -- Configure pprof listen port for cilium-agent port: 6060 - # -- Configure prometheus metrics on the configured port at /metrics prometheus: enabled: false @@ -2006,17 +2064,21 @@ prometheus: - __meta_kubernetes_pod_node_name targetLabel: node replacement: ${1} + # @schema + # type: [null, array] + # @schema # -- Metrics relabeling configs for the ServiceMonitor cilium-agent metricRelabelings: ~ # -- Set to `true` and helm will not check for monitoring.coreos.com/v1 CRDs before deploying trustCRDsExist: false - + # @schema + # type: [null, array] + # @schema # -- Metrics that should be enabled or disabled from the default metric list. # The list is expected to be separated by a space. (+metric_foo to enable # metric_foo , -metric_bar to disable metric_bar). # ref: https://docs.cilium.io/en/stable/observability/metrics/ metrics: ~ - # --- Enable controller group metrics for monitoring specific Cilium # subsystems. The list is a list of controller group names. The special # values of "all" and "none" are supported. The set of controller @@ -2025,40 +2087,36 @@ prometheus: - write-cni-file - sync-host-ips - sync-lb-maps-with-k8s-services - # -- Grafana dashboards for cilium-agent # grafana can import dashboards based on the label and value # ref: https://github.com/grafana/helm-charts/tree/main/charts/grafana#sidecar-for-dashboards dashboards: enabled: false label: grafana_dashboard + # @schema + # type: [null, string] + # @schema namespace: ~ labelValue: "1" annotations: {} - -# -- Configure Istio proxy options. -proxy: - - prometheus: - # -- Deprecated in favor of envoy.prometheus.enabled - enabled: true - # -- Deprecated in favor of envoy.prometheus.port - port: ~ - # -- Regular expression matching compatible Istio sidecar istio-proxy - # container image names - sidecarImageRegex: "cilium/istio_proxy" - # Configure Cilium Envoy options. envoy: + # @schema + # type: [null, boolean] + # @schema # -- Enable Envoy Proxy in standalone DaemonSet. - enabled: false - + # This field is enabled by default for new installation. + # @default -- `true` for new installation + enabled: ~ + # -- (int) + # Set Envoy'--base-id' to use when allocating shared memory regions. + # Only needs to be changed if multiple Envoy instances will run on the same node and may have conflicts. Supported values: 0 - 4294967295. Defaults to '0' + baseID: 0 log: # -- The format string to use for laying out the log message metadata of Envoy. format: "[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v" # -- Path to a separate Envoy log file, if any. Defaults to /dev/stdout. path: "" - # -- Time in seconds after which a TCP connection attempt times out connectTimeoutSeconds: 2 # -- ProxyMaxRequestsPerConnection specifies the max_requests_per_connection setting for Envoy @@ -2072,58 +2130,53 @@ envoy: xffNumTrustedHopsL7PolicyIngress: 0 # -- Number of trusted hops regarding the x-forwarded-for and related HTTP headers for the egress L7 policy enforcement Envoy listeners. xffNumTrustedHopsL7PolicyEgress: 0 - # -- Envoy container image. image: + # @schema + # type: [null, string] + # @schema override: ~ repository: "${CILIUM_ENVOY_REPO}" tag: "${CILIUM_ENVOY_VERSION}" pullPolicy: "${PULL_POLICY}" digest: "${CILIUM_ENVOY_DIGEST}" useDigest: true - # -- Additional containers added to the cilium Envoy DaemonSet. extraContainers: [] - # -- Additional envoy container arguments. extraArgs: [] - # -- Additional envoy container environment variables. extraEnv: [] - # -- Additional envoy hostPath mounts. extraHostPathMounts: [] - # - name: host-mnt-data - # mountPath: /host/mnt/data - # hostPath: /mnt/data - # hostPathType: Directory - # readOnly: true - # mountPropagation: HostToContainer + # - name: host-mnt-data + # mountPath: /host/mnt/data + # hostPath: /mnt/data + # hostPathType: Directory + # readOnly: true + # mountPropagation: HostToContainer # -- Additional envoy volumes. extraVolumes: [] - # -- Additional envoy volumeMounts. extraVolumeMounts: [] - # -- Configure termination grace period for cilium-envoy DaemonSet. terminationGracePeriodSeconds: 1 - # -- TCP port for the health API. healthPort: 9878 - # -- cilium-envoy update strategy # ref: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/#updating-a-daemonset updateStrategy: type: RollingUpdate rollingUpdate: + # @schema + # type: [integer, string] + # @schema maxUnavailable: 2 # -- Roll out cilium envoy pods automatically when configmap is updated. rollOutPods: false - # -- Annotations to be added to all top-level cilium-envoy objects (resources under templates/cilium-envoy) annotations: {} - # -- Security Context for cilium-envoy pods. podSecurityContext: # -- AppArmorProfile options for the `cilium-agent` and init containers @@ -2131,19 +2184,17 @@ envoy: type: "Unconfined" # -- Annotations to be added to envoy pods podAnnotations: {} - # -- Labels to be added to envoy pods podLabels: {} - # -- Envoy resource limits & requests # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ resources: {} - # limits: - # cpu: 4000m - # memory: 4Gi - # requests: - # cpu: 100m - # memory: 512Mi + # limits: + # cpu: 4000m + # memory: 4Gi + # requests: + # cpu: 100m + # memory: 512Mi startupProbe: # -- failure threshold of startup probe. @@ -2161,7 +2212,6 @@ envoy: failureThreshold: 3 # -- interval between checks of the readiness probe periodSeconds: 30 - securityContext: # -- User to run the pod with # runAsUser: 0 @@ -2175,7 +2225,13 @@ envoy: # type available on the system. type: 'spc_t' capabilities: - # -- Capabilities for the `cilium-envoy` container + # -- Capabilities for the `cilium-envoy` container. + # Even though granted to the container, the cilium-envoy-starter wrapper drops + # all capabilities after forking the actual Envoy process. + # `NET_BIND_SERVICE` is the only capability that can be passed to the Envoy process by + # setting `envoy.securityContext.capabilities.keepNetBindService=true` (in addition to granting the + # capability to the container). + # Note: In case of embedded envoy, the capability must be granted to the cilium-agent container. envoy: # Used since cilium proxy uses setting IPPROTO_IP/IP_TRANSPARENT - NET_ADMIN @@ -2188,15 +2244,16 @@ envoy: # If available, SYS_ADMIN can be removed. #- PERFMON #- BPF - + # -- Keep capability `NET_BIND_SERVICE` for Envoy process. + keepCapNetBindService: false # -- Affinity for cilium-envoy. affinity: podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - - topologyKey: kubernetes.io/hostname - labelSelector: - matchLabels: - k8s-app: cilium-envoy + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: cilium-envoy podAffinity: requiredDuringSchedulingIgnoredDuringExecution: - topologyKey: kubernetes.io/hostname @@ -2214,23 +2271,33 @@ envoy: # -- Node selector for cilium-envoy. nodeSelector: kubernetes.io/os: linux - # -- Node tolerations for envoy scheduling to nodes with taints # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ tolerations: - - operator: Exists - # - key: "key" - # operator: "Equal|Exists" - # value: "value" - # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" - + - operator: Exists + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + # @schema + # type: [null, string] + # @schema # -- The priority class to use for cilium-envoy. priorityClassName: ~ - + # @schema + # type: [null, string] + # @schema # -- DNS policy for Cilium envoy pods. # Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy dnsPolicy: ~ - + debug: + admin: + # -- Enable admin interface for cilium-envoy. + # This is useful for debugging and should not be enabled in production. + enabled: false + # -- Port number (bound to loopback interface). + # kubectl port-forward can be used to access the admin interface. + port: 9901 # -- Configure Cilium Envoy Prometheus options. # Note that some of these apply to either cilium-agent or cilium-envoy. prometheus: @@ -2258,16 +2325,17 @@ envoy: - __meta_kubernetes_pod_node_name targetLabel: node replacement: ${1} + # @schema + # type: [null, array] + # @schema # -- Metrics relabeling configs for the ServiceMonitor cilium-envoy # or for cilium-agent with Envoy configured. metricRelabelings: ~ # -- Serve prometheus metrics for cilium-envoy on the configured port port: "9964" -# -- Enable use of the remote node identity. -# ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity -# Deprecated without replacement in 1.15. To be removed in 1.16. -remoteNodeIdentity: true +# -- Enable/Disable use of node label based identity +nodeSelectorLabels: false # -- Enable resource quotas for priority classes used in the cluster. resourceQuotas: @@ -2280,7 +2348,6 @@ resourceQuotas: hard: # 15 "clusterwide" Cilium Operator pods for HA pods: "15" - # Need to document default ################## #sessionAffinity: false @@ -2289,13 +2356,10 @@ resourceQuotas: # uninstall Cilium as it will stop Cilium from starting and create artifacts # in the node. sleepAfterInit: false - # -- Enable check of service source ranges (currently, only for LoadBalancer). svcSourceRangeCheck: true - # -- Synchronize Kubernetes nodes to kvstore and perform CNP GC. synchronizeK8sNodes: true - # -- Configure TLS configuration in the agent. tls: # -- This configures how the Cilium agent loads the secrets used TLS-aware CiliumNetworkPolicies @@ -2304,7 +2368,6 @@ tls: # - local # - k8s secretsBackend: local - # -- Base64 encoded PEM values for the CA certificate and private key. # This can be used as common CA to generate certificates used by hubble and clustermesh components. # It is neither required nor used when cert-manager is used to generate the certificates. @@ -2312,30 +2375,23 @@ tls: # -- Optional CA cert. If it is provided, it will be used by cilium to # generate all other certificates. Otherwise, an ephemeral CA is generated. cert: "" - # -- Optional CA private key. If it is provided, it will be used by cilium to # generate all other certificates. Otherwise, an ephemeral CA is generated. key: "" - # -- Generated certificates validity duration in days. This will be used for auto generated CA. certValidityDuration: 1095 - # -- Configure the CA trust bundle used for the validation of the certificates # leveraged by hubble and clustermesh. When enabled, it overrides the content of the # 'ca.crt' field of the respective certificates, allowing for CA rotation with no down-time. caBundle: # -- Enable the use of the CA trust bundle. enabled: false - # -- Name of the ConfigMap containing the CA trust bundle. name: cilium-root-ca.crt - # -- Entry of the ConfigMap containing the CA trust bundle. key: ca.crt - # -- Use a Secret instead of a ConfigMap. useSecret: false - # If uncommented, creates the ConfigMap and fills it with the specified content. # Otherwise, the ConfigMap is assumed to be already present in .Release.Namespace. # @@ -2346,7 +2402,6 @@ tls: # -----BEGIN CERTIFICATE----- # ... # -----END CERTIFICATE----- - # -- Tunneling protocol to use in tunneling mode and for ad-hoc tunnels. # Possible values: # - "" @@ -2354,7 +2409,6 @@ tls: # - geneve # @default -- `"vxlan"` tunnelProtocol: "" - # -- Enable native-routing mode or tunneling mode. # Possible values: # - "" @@ -2362,142 +2416,43 @@ tunnelProtocol: "" # - tunnel # @default -- `"tunnel"` routingMode: "" - # -- Configure VXLAN and Geneve tunnel port. # @default -- Port 8472 for VXLAN, Port 6081 for Geneve tunnelPort: 0 - # -- Configure what the response should be to traffic for a service without backends. # "reject" only works on kernels >= 5.10, on lower kernels we fallback to "drop". # Possible values: # - reject (default) # - drop serviceNoBackendResponse: reject - # -- Configure the underlying network MTU to overwrite auto-detected MTU. +# This value doesn't change the host network interface MTU i.e. eth0 or ens0. +# It changes the MTU for cilium_net@cilium_host, cilium_host@cilium_net, +# cilium_vxlan and lxc_health interfaces. MTU: 0 - # -- Disable the usage of CiliumEndpoint CRD. disableEndpointCRD: false - wellKnownIdentities: # -- Enable the use of well-known identities. enabled: false - etcd: # -- Enable etcd mode for the agent. enabled: false - - # -- cilium-etcd-operator image. - image: - override: ~ - repository: "${CILIUM_ETCD_OPERATOR_REPO}" - tag: "${CILIUM_ETCD_OPERATOR_VERSION}" - digest: "${CILIUM_ETCD_OPERATOR_DIGEST}" - useDigest: true - pullPolicy: "${PULL_POLICY}" - - # -- The priority class to use for cilium-etcd-operator - priorityClassName: "" - - # -- Additional cilium-etcd-operator container arguments. - extraArgs: [] - - # -- Additional cilium-etcd-operator volumes. - extraVolumes: [] - - # -- Additional cilium-etcd-operator volumeMounts. - extraVolumeMounts: [] - - # -- Node tolerations for cilium-etcd-operator scheduling to nodes with taints - # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ - tolerations: - - operator: Exists - # - key: "key" - # operator: "Equal|Exists" - # value: "value" - # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" - - # -- Pod topology spread constraints for cilium-etcd-operator - topologySpreadConstraints: [] - # - maxSkew: 1 - # topologyKey: topology.kubernetes.io/zone - # whenUnsatisfiable: DoNotSchedule - - # -- Node labels for cilium-etcd-operator pod assignment - # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector - nodeSelector: - kubernetes.io/os: linux - - # -- Annotations to be added to all top-level etcd-operator objects (resources under templates/etcd-operator) - annotations: {} - - # -- Security context to be added to cilium-etcd-operator pods - podSecurityContext: {} - - # -- Annotations to be added to cilium-etcd-operator pods - podAnnotations: {} - - # -- Labels to be added to cilium-etcd-operator pods - podLabels: {} - - # PodDisruptionBudget settings - podDisruptionBudget: - # -- enable PodDisruptionBudget - # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ - enabled: false - # -- Minimum number/percentage of pods that should remain scheduled. - # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` - minAvailable: null - # -- Maximum number/percentage of pods that may be made unavailable - maxUnavailable: 1 - - # -- cilium-etcd-operator resource limits & requests - # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - resources: {} - # limits: - # cpu: 4000m - # memory: 4Gi - # requests: - # cpu: 100m - # memory: 512Mi - - # -- Security context to be added to cilium-etcd-operator pods - securityContext: {} - # runAsUser: 0 - - # -- cilium-etcd-operator update strategy - updateStrategy: - type: RollingUpdate - rollingUpdate: - maxSurge: 1 - maxUnavailable: 1 - - # -- If etcd is behind a k8s service set this option to true so that Cilium - # does the service translation automatically without requiring a DNS to be - # running. - k8sService: false - - # -- Cluster domain for cilium-etcd-operator. - clusterDomain: cluster.local - - # -- List of etcd endpoints (not needed when using managed=true). + # -- List of etcd endpoints endpoints: - https://CHANGE-ME:2379 - - # -- Enable use of TLS/SSL for connectivity to etcd. (auto-enabled if - # managed=true) + # -- Enable use of TLS/SSL for connectivity to etcd. ssl: false - operator: # -- Enable the cilium-operator component (required). enabled: true - # -- Roll out cilium-operator pods automatically when configmap is updated. rollOutPods: false - # -- cilium-operator image. image: + # @schema + # type: [null, string] + # @schema override: ~ repository: "${CILIUM_OPERATOR_BASE_REPO}" tag: "${CILIUM_VERSION}" @@ -2512,126 +2467,116 @@ operator: useDigest: ${USE_DIGESTS} pullPolicy: "${PULL_POLICY}" suffix: "${CILIUM_OPERATOR_SUFFIX}" - # -- Number of replicas to run for the cilium-operator deployment replicas: 2 - # -- The priority class to use for cilium-operator priorityClassName: "" - # -- DNS policy for Cilium operator pods. # Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy dnsPolicy: "" - # -- cilium-operator update strategy updateStrategy: type: RollingUpdate rollingUpdate: + # @schema + # type: [integer, string] + # @schema maxSurge: 25% + # @schema + # type: [integer, string] + # @schema maxUnavailable: 50% - # -- Affinity for cilium-operator affinity: podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - - topologyKey: kubernetes.io/hostname - labelSelector: - matchLabels: - io.cilium/app: operator - + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + io.cilium/app: operator # -- Pod topology spread constraints for cilium-operator topologySpreadConstraints: [] - # - maxSkew: 1 - # topologyKey: topology.kubernetes.io/zone - # whenUnsatisfiable: DoNotSchedule + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule # -- Node labels for cilium-operator pod assignment # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector nodeSelector: kubernetes.io/os: linux - # -- Node tolerations for cilium-operator scheduling to nodes with taints # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ tolerations: - - operator: Exists - # - key: "key" - # operator: "Equal|Exists" - # value: "value" - # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" - + - operator: Exists + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" # -- Additional cilium-operator container arguments. extraArgs: [] - # -- Additional cilium-operator environment variables. extraEnv: [] - # -- Additional cilium-operator hostPath mounts. extraHostPathMounts: [] - # - name: host-mnt-data - # mountPath: /host/mnt/data - # hostPath: /mnt/data - # hostPathType: Directory - # readOnly: true - # mountPropagation: HostToContainer + # - name: host-mnt-data + # mountPath: /host/mnt/data + # hostPath: /mnt/data + # hostPathType: Directory + # readOnly: true + # mountPropagation: HostToContainer # -- Additional cilium-operator volumes. extraVolumes: [] - # -- Additional cilium-operator volumeMounts. extraVolumeMounts: [] - # -- Annotations to be added to all top-level cilium-operator objects (resources under templates/cilium-operator) annotations: {} - + # -- HostNetwork setting + hostNetwork: true # -- Security context to be added to cilium-operator pods podSecurityContext: {} - # -- Annotations to be added to cilium-operator pods podAnnotations: {} - # -- Labels to be added to cilium-operator pods podLabels: {} - # PodDisruptionBudget settings podDisruptionBudget: # -- enable PodDisruptionBudget # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ enabled: false + # @schema + # type: [null, integer, string] + # @schema # -- Minimum number/percentage of pods that should remain scheduled. # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` minAvailable: null + # @schema + # type: [null, integer, string] + # @schema # -- Maximum number/percentage of pods that may be made unavailable maxUnavailable: 1 - # -- cilium-operator resource limits & requests # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ resources: {} - # limits: - # cpu: 1000m - # memory: 1Gi - # requests: - # cpu: 100m - # memory: 128Mi + # limits: + # cpu: 1000m + # memory: 1Gi + # requests: + # cpu: 100m + # memory: 128Mi # -- Security context to be added to cilium-operator pods securityContext: {} - # runAsUser: 0 + # runAsUser: 0 # -- Interval for endpoint garbage collection. endpointGCInterval: "5m0s" - # -- Interval for cilium node garbage collection. nodeGCInterval: "5m0s" - - # -- Skip CNP node status clean up at operator startup. - skipCNPStatusStartupClean: false - # -- Interval for identity garbage collection. identityGCInterval: "15m0s" - # -- Timeout for identity heartbeats. identityHeartbeatTimeout: "30m0s" - pprof: # -- Enable pprof for cilium-operator enabled: false @@ -2639,7 +2584,6 @@ operator: address: localhost # -- Configure pprof listen port for cilium-operator port: 6061 - # -- Enable prometheus metrics for cilium-operator on the configured port at # /metrics prometheus: @@ -2657,96 +2601,92 @@ operator: jobLabel: "" # -- Interval for scrape metrics. interval: "10s" + # @schema + # type: [null, array] + # @schema # -- Relabeling configs for the ServiceMonitor cilium-operator relabelings: ~ + # @schema + # type: [null, array] + # @schema # -- Metrics relabeling configs for the ServiceMonitor cilium-operator metricRelabelings: ~ - # -- Grafana dashboards for cilium-operator # grafana can import dashboards based on the label and value # ref: https://github.com/grafana/helm-charts/tree/main/charts/grafana#sidecar-for-dashboards dashboards: enabled: false label: grafana_dashboard + # @schema + # type: [null, string] + # @schema namespace: ~ labelValue: "1" annotations: {} - # -- Skip CRDs creation for cilium-operator skipCRDCreation: false - # -- Remove Cilium node taint from Kubernetes nodes that have a healthy Cilium # pod running. removeNodeTaints: true - + # @schema + # type: [null, boolean] + # @schema # -- Taint nodes where Cilium is scheduled but not running. This prevents pods # from being scheduled to nodes where Cilium is not the default CNI provider. # @default -- same as removeNodeTaints setNodeTaints: ~ - # -- Set Node condition NetworkUnavailable to 'false' with the reason # 'CiliumIsUp' for nodes that have a healthy Cilium pod. setNodeNetworkStatus: true - unmanagedPodWatcher: # -- Restart any pod that are not managed by Cilium. restart: true # -- Interval, in seconds, to check if there are any pods that are not # managed by Cilium. intervalSeconds: 15 - nodeinit: # -- Enable the node initialization DaemonSet enabled: false - # -- node-init image. image: + # @schema + # type: [null, string] + # @schema override: ~ repository: "${CILIUM_NODEINIT_REPO}" tag: "${CILIUM_NODEINIT_VERSION}" digest: "${CILIUM_NODEINIT_DIGEST}" useDigest: true pullPolicy: "${PULL_POLICY}" - # -- The priority class to use for the nodeinit pod. priorityClassName: "" - # -- node-init update strategy updateStrategy: type: RollingUpdate - # -- Additional nodeinit environment variables. extraEnv: [] - # -- Additional nodeinit volumes. extraVolumes: [] - # -- Additional nodeinit volumeMounts. extraVolumeMounts: [] - # -- Affinity for cilium-nodeinit affinity: {} - # -- Node labels for nodeinit pod assignment # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector nodeSelector: kubernetes.io/os: linux - # -- Node tolerations for nodeinit scheduling to nodes with taints # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ tolerations: - - operator: Exists - # - key: "key" - # operator: "Equal|Exists" - # value: "value" - # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" - + - operator: Exists + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" # -- Annotations to be added to all top-level nodeinit objects (resources under templates/cilium-nodeinit) annotations: {} - # -- Annotations to be added to node-init pods. podAnnotations: {} - # -- Labels to be added to node-init pods. podLabels: {} # -- Security Context for cilium-node-init pods. @@ -2760,7 +2700,6 @@ nodeinit: requests: cpu: 100m memory: 100Mi - # -- Security context to be added to nodeinit pods. securityContext: privileged: false @@ -2779,26 +2718,25 @@ nodeinit: - SYS_ADMIN - SYS_CHROOT - SYS_PTRACE - # -- bootstrapFile is the location of the file where the bootstrap timestamp is # written by the node-init DaemonSet bootstrapFile: "/tmp/cilium-bootstrap.d/cilium-bootstrap-time" - # -- startup offers way to customize startup nodeinit script (pre and post position) startup: - preScript: "" - postScript: "" + preScript: "" + postScript: "" # -- prestop offers way to customize prestop nodeinit script (pre and post position) prestop: - preScript: "" - postScript: "" - + preScript: "" + postScript: "" preflight: # -- Enable Cilium pre-flight resources (required for upgrade) enabled: false - # -- Cilium pre-flight image. image: + # @schema + # type: [null, string] + # @schema override: ~ repository: "${CILIUM_REPO}" tag: "${CILIUM_VERSION}" @@ -2806,115 +2744,97 @@ preflight: digest: ${CILIUM_DIGEST} useDigest: ${USE_DIGESTS} pullPolicy: "${PULL_POLICY}" - # -- The priority class to use for the preflight pod. priorityClassName: "" - # -- preflight update strategy updateStrategy: type: RollingUpdate - # -- Additional preflight environment variables. extraEnv: [] - # -- Additional preflight volumes. extraVolumes: [] - # -- Additional preflight volumeMounts. extraVolumeMounts: [] - # -- Affinity for cilium-preflight affinity: podAffinity: requiredDuringSchedulingIgnoredDuringExecution: - - topologyKey: kubernetes.io/hostname - labelSelector: - matchLabels: - k8s-app: cilium - + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: cilium # -- Node labels for preflight pod assignment # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector nodeSelector: kubernetes.io/os: linux - # -- Node tolerations for preflight scheduling to nodes with taints # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ tolerations: - - key: node.kubernetes.io/not-ready - effect: NoSchedule - - key: node-role.kubernetes.io/master - effect: NoSchedule - - key: node-role.kubernetes.io/control-plane - effect: NoSchedule - - key: node.cloudprovider.kubernetes.io/uninitialized - effect: NoSchedule - value: "true" - - key: CriticalAddonsOnly - operator: "Exists" - # - key: "key" - # operator: "Equal|Exists" - # value: "value" - # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" - + - operator: Exists + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" # -- Annotations to be added to all top-level preflight objects (resources under templates/cilium-preflight) annotations: {} - # -- Security context to be added to preflight pods. podSecurityContext: {} - # -- Annotations to be added to preflight pods podAnnotations: {} - # -- Labels to be added to the preflight pod. podLabels: {} - # PodDisruptionBudget settings podDisruptionBudget: # -- enable PodDisruptionBudget # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ enabled: false + # @schema + # type: [null, integer, string] + # @schema # -- Minimum number/percentage of pods that should remain scheduled. # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` minAvailable: null + # @schema + # type: [null, integer, string] + # @schema # -- Maximum number/percentage of pods that may be made unavailable maxUnavailable: 1 - # -- preflight resource limits & requests # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ resources: {} - # limits: - # cpu: 4000m - # memory: 4Gi - # requests: - # cpu: 100m - # memory: 512Mi + # limits: + # cpu: 4000m + # memory: 4Gi + # requests: + # cpu: 100m + # memory: 512Mi + readinessProbe: + # -- For how long kubelet should wait before performing the first probe + initialDelaySeconds: 5 + # -- interval between checks of the readiness probe + periodSeconds: 5 # -- Security context to be added to preflight pods securityContext: {} - # runAsUser: 0 + # runAsUser: 0 # -- Path to write the `--tofqdns-pre-cache` file to. tofqdnsPreCache: "" - # -- Configure termination grace period for preflight Deployment and DaemonSet. terminationGracePeriodSeconds: 1 - # -- By default we should always validate the installed CNPs before upgrading # Cilium. This will make sure the user will have the policies deployed in the # cluster with the right schema. validateCNPs: true - # -- Explicitly enable or disable priority class. # .Capabilities.KubeVersion is unsettable in `helm template` calls, # it depends on k8s libraries version that Helm was compiled against. # This option allows to explicitly disable setting the priority class, which # is useful for rendering charts for gke clusters in advance. enableCriticalPriorityClass: true - # disableEnvoyVersionCheck removes the check for Envoy, which can be useful # on AArch64 as the images do not currently ship a version of Envoy. #disableEnvoyVersionCheck: false - clustermesh: # -- Deploy clustermesh-apiserver for clustermesh useAPIServer: false @@ -2924,10 +2844,14 @@ clustermesh: # maximum allocatable cluster-local identities. # Supported values are 255 and 511. maxConnectedClusters: 255 + # -- Enable the synchronization of Kubernetes EndpointSlices corresponding to + # the remote endpoints of appropriately-annotated global services through ClusterMesh + enableEndpointSliceSynchronization: false + # -- Enable Multi-Cluster Services API support + enableMCSAPISupport: false # -- Annotations to be added to all top-level clustermesh objects (resources under templates/clustermesh-apiserver and templates/clustermesh-config) annotations: {} - # -- Clustermesh explicit configuration. config: # -- Enable the Clustermesh explicit configuration. @@ -2957,10 +2881,12 @@ clustermesh: # cert: "" # key: "" # caCert: "" - apiserver: # -- Clustermesh API server image. image: + # @schema + # type: [null, string] + # @schema override: ~ repository: "${CLUSTERMESH_APISERVER_REPO}" tag: "${CILIUM_VERSION}" @@ -2969,6 +2895,11 @@ clustermesh: useDigest: ${USE_DIGESTS} pullPolicy: "${PULL_POLICY}" + # -- TCP port for the clustermesh-apiserver health API. + healthPort: 9880 + # -- Configuration for the clustermesh-apiserver readiness probe. + readinessProbe: {} + etcd: # The etcd binary is included in the clustermesh API server image, so the same image from above is reused. # Independent override isn't supported, because clustermesh-apiserver is tested against the etcd version it is @@ -2984,11 +2915,13 @@ clustermesh: # memory: 256Mi # -- Security context to be added to clustermesh-apiserver etcd containers - securityContext: {} - + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL # -- lifecycle setting for the etcd container lifecycle: {} - init: # -- Specifies the resources for etcd init container in the apiserver resources: {} @@ -3001,43 +2934,52 @@ clustermesh: # -- Additional arguments to `clustermesh-apiserver etcdinit`. extraArgs: [] - # -- Additional environment variables to `clustermesh-apiserver etcdinit`. extraEnv: [] + # @schema + # enum: [Disk, Memory] + # @schema + # -- Specifies whether etcd data is stored in a temporary volume backed by + # the node's default medium, such as disk, SSD or network storage (Disk), or + # RAM (Memory). The Memory option enables improved etcd read and write + # performance at the cost of additional memory usage, which counts against + # the memory limits of the container. + storageMedium: Disk + kvstoremesh: # -- Enable KVStoreMesh. KVStoreMesh caches the information retrieved # from the remote clusters in the local etcd instance. - enabled: false + enabled: true + + # -- TCP port for the KVStoreMesh health API. + healthPort: 9881 + # -- Configuration for the KVStoreMesh readiness probe. + readinessProbe: {} # -- Additional KVStoreMesh arguments. extraArgs: [] - # -- Additional KVStoreMesh environment variables. extraEnv: [] - # -- Resource requests and limits for the KVStoreMesh container resources: {} - # requests: - # cpu: 100m - # memory: 64Mi - # limits: - # cpu: 1000m - # memory: 1024M + # requests: + # cpu: 100m + # memory: 64Mi + # limits: + # cpu: 1000m + # memory: 1024M # -- Additional KVStoreMesh volumeMounts. extraVolumeMounts: [] - # -- KVStoreMesh Security context securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL - # -- lifecycle setting for the KVStoreMesh container lifecycle: {} - service: # -- The type of service used for apiserver access. type: NodePort @@ -3051,114 +2993,144 @@ clustermesh: # NodePort will be redirected to a local backend, regardless of whether the # destination node belongs to the local or the remote cluster. nodePort: 32379 - # -- Optional loadBalancer IP address to use with type LoadBalancer. - # loadBalancerIP: - # -- Annotations for the clustermesh-apiserver # For GKE LoadBalancer, use annotation cloud.google.com/load-balancer-type: "Internal" - # For EKS LoadBalancer, use annotation service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 + # For EKS LoadBalancer, use annotation service.beta.kubernetes.io/aws-load-balancer-internal: "true" annotations: {} - + # @schema + # enum: [Local, Cluster] + # @schema # -- The externalTrafficPolicy of service used for apiserver access. - externalTrafficPolicy: - + externalTrafficPolicy: Cluster + # @schema + # enum: [Local, Cluster] + # @schema # -- The internalTrafficPolicy of service used for apiserver access. - internalTrafficPolicy: - + internalTrafficPolicy: Cluster + # @schema + # enum: [HAOnly, Always, Never] + # @schema + # -- Defines when to enable session affinity. + # Each replica in a clustermesh-apiserver deployment runs its own discrete + # etcd cluster. Remote clients connect to one of the replicas through a + # shared Kubernetes Service. A client reconnecting to a different backend + # will require a full resync to ensure data integrity. Session affinity + # can reduce the likelihood of this happening, but may not be supported + # by all cloud providers. + # Possible values: + # - "HAOnly" (default) Only enable session affinity for deployments with more than 1 replica. + # - "Always" Always enable session affinity. + # - "Never" Never enable session affinity. Useful in environments where + # session affinity is not supported, but may lead to slightly + # degraded performance due to more frequent reconnections. + enableSessionAffinity: "HAOnly" + # @schema + # type: [null, string] + # @schema + # -- Configure a loadBalancerClass. + # Allows to configure the loadBalancerClass on the clustermesh-apiserver + # LB service in case the Service type is set to LoadBalancer + # (requires Kubernetes 1.24+). + loadBalancerClass: ~ + # @schema + # type: [null, string] + # @schema + # -- Configure a specific loadBalancerIP. + # Allows to configure a specific loadBalancerIP on the clustermesh-apiserver + # LB service in case the Service type is set to LoadBalancer. + loadBalancerIP: ~ # -- Number of replicas run for the clustermesh-apiserver deployment. replicas: 1 - # -- lifecycle setting for the apiserver container lifecycle: {} - # -- terminationGracePeriodSeconds for the clustermesh-apiserver deployment terminationGracePeriodSeconds: 30 - # -- Additional clustermesh-apiserver arguments. extraArgs: [] - # -- Additional clustermesh-apiserver environment variables. extraEnv: [] - # -- Additional clustermesh-apiserver volumes. extraVolumes: [] - # -- Additional clustermesh-apiserver volumeMounts. extraVolumeMounts: [] - # -- Security context to be added to clustermesh-apiserver containers - securityContext: {} - + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL # -- Security context to be added to clustermesh-apiserver pods - podSecurityContext: {} - + podSecurityContext: + runAsNonRoot: true + runAsUser: 65532 + runAsGroup: 65532 + fsGroup: 65532 # -- Annotations to be added to clustermesh-apiserver pods podAnnotations: {} - # -- Labels to be added to clustermesh-apiserver pods podLabels: {} - # PodDisruptionBudget settings podDisruptionBudget: # -- enable PodDisruptionBudget # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ enabled: false + # @schema + # type: [null, integer, string] + # @schema # -- Minimum number/percentage of pods that should remain scheduled. # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` minAvailable: null + # @schema + # type: [null, integer, string] + # @schema # -- Maximum number/percentage of pods that may be made unavailable maxUnavailable: 1 - - # -- Resource requests and limits for the clustermesh-apiserver container of the clustermesh-apiserver deployment, such as - # resources: - # limits: - # cpu: 1000m - # memory: 1024M - # requests: - # cpu: 100m - # memory: 64Mi # -- Resource requests and limits for the clustermesh-apiserver resources: {} - # requests: - # cpu: 100m - # memory: 64Mi - # limits: - # cpu: 1000m - # memory: 1024M + # requests: + # cpu: 100m + # memory: 64Mi + # limits: + # cpu: 1000m + # memory: 1024M # -- Affinity for clustermesh.apiserver affinity: podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - topologyKey: kubernetes.io/hostname - labelSelector: - matchLabels: - k8s-app: clustermesh-apiserver - + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchLabels: + k8s-app: clustermesh-apiserver + topologyKey: kubernetes.io/hostname # -- Pod topology spread constraints for clustermesh-apiserver topologySpreadConstraints: [] - # - maxSkew: 1 - # topologyKey: topology.kubernetes.io/zone - # whenUnsatisfiable: DoNotSchedule + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule # -- Node labels for pod assignment # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector nodeSelector: kubernetes.io/os: linux - # -- Node tolerations for pod assignment on nodes with taints # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ tolerations: [] - # -- clustermesh-apiserver update strategy updateStrategy: type: RollingUpdate rollingUpdate: - maxUnavailable: 1 - + # @schema + # type: [integer, string] + # @schema + maxSurge: 1 + # @schema + # type: [integer, string] + # @schema + maxUnavailable: 0 # -- The priority class to use for clustermesh-apiserver priorityClassName: "" - tls: # -- Configure the clustermesh authentication mode. # Supported values: @@ -3176,7 +3148,13 @@ clustermesh: # if provided manually. Cluster mode is meaningful only when the same # CA is shared across all clusters part of the mesh. authMode: legacy - + # -- Allow users to provide their own certificates + # Users may need to provide their certificates using + # a mechanism that requires they provide their own secrets. + # This setting does not apply to any of the auto-generated + # mechanisms below, it only restricts the creation of secrets + # via the `tls-provided` templates. + enableSecrets: true # -- Configure automatic TLS certificates generation. # A Kubernetes CronJob is used the generate any # certificates not provided by the user at installation @@ -3240,20 +3218,17 @@ clustermesh: remote: cert: "" key: "" - # clustermesh-apiserver Prometheus metrics configuration metrics: # -- Enables exporting apiserver metrics in OpenMetrics format. enabled: true # -- Configure the port the apiserver metric server listens on. port: 9962 - kvstoremesh: # -- Enables exporting KVStoreMesh metrics in OpenMetrics format. enabled: true - # -- Configure the port the KVStoreMesh metric server listens on. + # -- Configure the port the KVStoreMesh metric server listens on. port: 9964 - etcd: # -- Enables exporting etcd metrics in OpenMetrics format. enabled: true @@ -3261,7 +3236,6 @@ clustermesh: mode: basic # -- Configure the port the etcd metric server listens on. port: 9963 - serviceMonitor: # -- Enable service monitor. # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) @@ -3276,32 +3250,46 @@ clustermesh: # -- Interval for scrape metrics (apiserver metrics) interval: "10s" + # @schema + # type: [null, array] + # @schema # -- Relabeling configs for the ServiceMonitor clustermesh-apiserver (apiserver metrics) relabelings: ~ + # @schema + # type: [null, array] + # @schema # -- Metrics relabeling configs for the ServiceMonitor clustermesh-apiserver (apiserver metrics) metricRelabelings: ~ - kvstoremesh: # -- Interval for scrape metrics (KVStoreMesh metrics) interval: "10s" + # @schema + # type: [null, array] + # @schema # -- Relabeling configs for the ServiceMonitor clustermesh-apiserver (KVStoreMesh metrics) relabelings: ~ + # @schema + # type: [null, array] + # @schema # -- Metrics relabeling configs for the ServiceMonitor clustermesh-apiserver (KVStoreMesh metrics) metricRelabelings: ~ - etcd: # -- Interval for scrape metrics (etcd metrics) interval: "10s" + # @schema + # type: [null, array] + # @schema # -- Relabeling configs for the ServiceMonitor clustermesh-apiserver (etcd metrics) relabelings: ~ + # @schema + # type: [null, array] + # @schema # -- Metrics relabeling configs for the ServiceMonitor clustermesh-apiserver (etcd metrics) metricRelabelings: ~ - # -- Configure external workloads support externalWorkloads: # -- Enable support for external workloads, such as VMs (false by default). enabled: false - # -- Configure cgroup related configuration cgroup: autoMount: @@ -3314,27 +3302,30 @@ cgroup: enabled: true # -- Init Container Cgroup Automount resource limits & requests resources: {} - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi # -- Configure cgroup root where cgroup2 filesystem is mounted on the host (see also: `cgroup.autoMount`) hostRoot: /run/cilium/cgroupv2 - +# -- Configure sysctl override described in #20072. +sysctlfix: + # -- Enable the sysctl override. When enabled, the init container will mount the /proc of the host so that the `sysctlfix` utility can execute. + enabled: true # -- Configure whether to enable auto detect of terminating state for endpoints # in order to support graceful termination. enableK8sTerminatingEndpoint: true - # -- Configure whether to unload DNS policy rules on graceful shutdown # dnsPolicyUnloadOnShutdown: false # -- Configure the key of the taint indicating that Cilium is not ready on the node. # When set to a value starting with `ignore-taint.cluster-autoscaler.kubernetes.io/`, the Cluster Autoscaler will ignore the taint on its decisions, allowing the cluster to scale up. agentNotReadyTaintKey: "node.cilium.io/agent-not-ready" - dnsProxy: + # -- Timeout (in seconds) when closing the connection between the DNS proxy and the upstream server. If set to 0, the connection is closed immediately (with TCP RST). If set to -1, the connection is closed asynchronously in the background. + socketLingerTimeout: 10 # -- DNS response code for rejecting DNS requests, available options are '[nameError refused]'. dnsRejectResponseCode: refused # -- Allow the DNS proxy to compress responses to endpoints that are larger than 512 Bytes or the EDNS0 option, if present. @@ -3358,12 +3349,10 @@ dnsProxy: proxyResponseMaxDelay: 100ms # -- DNS proxy operation mode (true/false, or unset to use version dependent defaults) # enableTransparentMode: true - # -- SCTP Configuration Values sctp: # -- Enable SCTP support. NOTE: Currently, SCTP support does not support rewriting ports or multihoming. enabled: false - # Configuration for types of authentication for Cilium (beta) authentication: # -- Enable authentication processing and garbage collection. @@ -3374,11 +3363,11 @@ authentication: queueSize: 1024 # -- Buffer size of the channel Cilium uses to receive certificate expiration events from auth handlers. rotatedIdentitiesQueueSize: 1024 - # -- Interval for garbage collection of auth map entries. + # -- Interval for garbage collection of auth map entries. gcInterval: "5m0s" # Configuration for Cilium's service-to-service mutual authentication using TLS handshakes. # Note that this is not full mTLS support without also enabling encryption of some form. - # Current encryption options are Wireguard or IPSec, configured in encryption block above. + # Current encryption options are WireGuard or IPsec, configured in encryption block above. mutual: # -- Port on the agent where mutual authentication handshakes between agents will be performed port: 4250 @@ -3401,6 +3390,9 @@ authentication: existingNamespace: false # -- init container image of SPIRE agent and server initImage: + # @schema + # type: [null, string] + # @schema override: ~ repository: "${SPIRE_INIT_REPO}" tag: "${SPIRE_INIT_VERSION}" @@ -3411,6 +3403,9 @@ authentication: agent: # -- SPIRE agent image image: + # @schema + # type: [null, string] + # @schema override: ~ repository: "${SPIRE_AGENT_REPO}" tag: "${SPIRE_AGENT_VERSION}" @@ -3432,17 +3427,17 @@ authentication: # to allow the Cilium agent on this node to connect to SPIRE. # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ tolerations: - - key: node.kubernetes.io/not-ready - effect: NoSchedule - - key: node-role.kubernetes.io/master - effect: NoSchedule - - key: node-role.kubernetes.io/control-plane - effect: NoSchedule - - key: node.cloudprovider.kubernetes.io/uninitialized - effect: NoSchedule - value: "true" - - key: CriticalAddonsOnly - operator: "Exists" + - key: node.kubernetes.io/not-ready + effect: NoSchedule + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule + - key: node.cloudprovider.kubernetes.io/uninitialized + effect: NoSchedule + value: "true" + - key: CriticalAddonsOnly + operator: "Exists" # -- SPIRE agent affinity configuration affinity: {} # -- SPIRE agent nodeSelector configuration @@ -3459,6 +3454,9 @@ authentication: server: # -- SPIRE server image image: + # @schema + # type: [null, string] + # @schema override: ~ repository: "${SPIRE_SERVER_REPO}" tag: "${SPIRE_SERVER_VERSION}" @@ -3499,6 +3497,9 @@ authentication: size: 1Gi # -- Access mode of the SPIRE server data storage accessMode: ReadWriteOnce + # @schema + # type: [null, string] + # @schema # -- StorageClass of the SPIRE server data storage storageClass: null # -- Security context to be added to spire server pods. @@ -3519,6 +3520,9 @@ authentication: country: "US" organization: "SPIRE" commonName: "Cilium SPIRE CA" + # @schema + # type: [null, string] + # @schema # -- SPIRE server address used by Cilium Operator # # If k8s Service DNS along with port number is used (e.g. ..svc(.*): format), From 2d05af6b55c57d6cbc0a6fc7c536cf8210ac48dd Mon Sep 17 00:00:00 2001 From: Hritik Batra Date: Wed, 31 Jul 2024 20:40:08 +0530 Subject: [PATCH 06/25] [CI] Helm Chart Update cloudnative-pg --- CHANGELOG.md | 1 + argocd-helm-charts/cloudnative-pg/Chart.lock | 6 +- argocd-helm-charts/cloudnative-pg/Chart.yaml | 2 +- .../charts/cloudnative-pg/Chart.yaml | 4 +- .../charts/cloudnative-pg/README.md | 6 +- .../cloudnative-pg/templates/crds/crds.yaml | 5 +- .../cloudnative-pg/templates/deployment.yaml | 6 ++ .../cloudnative-pg/templates/podmonitor.yaml | 8 ++ .../charts/cloudnative-pg/templates/rbac.yaml | 4 - .../charts/cloudnative-pg/values.schema.json | 12 +++ .../charts/cloudnative-pg/values.yaml | 73 +++++++++++++++++++ 11 files changed, 114 insertions(+), 13 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bd9322b92..ce6c01ebc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,3 +9,4 @@ All releases and the changes included in them (pulled from git commits added sin - Updated: aws-efs-csi-driver from version 3.0.6 to 3.0.7 - Updated: cert-manager from version v1.15.1 to v1.15.2 - Updated: cilium from version 1.15.6 to 1.16.0 +- Updated: cloudnative-pg from version 0.21.5 to 0.21.6 diff --git a/argocd-helm-charts/cloudnative-pg/Chart.lock b/argocd-helm-charts/cloudnative-pg/Chart.lock index 52fdc91ec..dbe739886 100644 --- a/argocd-helm-charts/cloudnative-pg/Chart.lock +++ b/argocd-helm-charts/cloudnative-pg/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: cloudnative-pg repository: https://cloudnative-pg.github.io/charts - version: 0.21.5 -digest: sha256:7131dd0161492d6dd383b24844fb78a5a831f8ff353c6eb468b033c7f37d885b -generated: "2024-07-09T02:54:18.857792509+05:30" + version: 0.21.6 +digest: sha256:0244008546bc4bb9ad7317fdd32d025c30bedcf76757cc1aff843e392fbca670 +generated: "2024-07-31T20:39:58.498205516+05:30" diff --git a/argocd-helm-charts/cloudnative-pg/Chart.yaml b/argocd-helm-charts/cloudnative-pg/Chart.yaml index 982933534..39a7a8499 100644 --- a/argocd-helm-charts/cloudnative-pg/Chart.yaml +++ b/argocd-helm-charts/cloudnative-pg/Chart.yaml @@ -3,5 +3,5 @@ name: cloudnative-pg version: 1.0.0 dependencies: - name: cloudnative-pg - version: 0.21.5 + version: 0.21.6 repository: https://cloudnative-pg.github.io/charts diff --git a/argocd-helm-charts/cloudnative-pg/charts/cloudnative-pg/Chart.yaml b/argocd-helm-charts/cloudnative-pg/charts/cloudnative-pg/Chart.yaml index 285a127ec..ecf4c24cb 100644 --- a/argocd-helm-charts/cloudnative-pg/charts/cloudnative-pg/Chart.yaml +++ b/argocd-helm-charts/cloudnative-pg/charts/cloudnative-pg/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: 1.23.2 +appVersion: 1.23.3 dependencies: - alias: monitoring condition: monitoring.grafanaDashboard.create @@ -22,4 +22,4 @@ name: cloudnative-pg sources: - https://github.com/cloudnative-pg/charts type: application -version: 0.21.5 +version: 0.21.6 diff --git a/argocd-helm-charts/cloudnative-pg/charts/cloudnative-pg/README.md b/argocd-helm-charts/cloudnative-pg/charts/cloudnative-pg/README.md index 2aa7853ab..70b14e5e2 100644 --- a/argocd-helm-charts/cloudnative-pg/charts/cloudnative-pg/README.md +++ b/argocd-helm-charts/cloudnative-pg/charts/cloudnative-pg/README.md @@ -1,6 +1,6 @@ # cloudnative-pg -![Version: 0.21.5](https://img.shields.io/badge/Version-0.21.5-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.23.2](https://img.shields.io/badge/AppVersion-1.23.2-informational?style=flat-square) +![Version: 0.21.6](https://img.shields.io/badge/Version-0.21.6-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.23.3](https://img.shields.io/badge/AppVersion-1.23.3-informational?style=flat-square) CloudNativePG Operator Helm Chart @@ -37,7 +37,9 @@ CloudNativePG Operator Helm Chart | config.secret | bool | `false` | Specifies whether it should be stored in a secret, instead of a configmap. | | containerSecurityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"readOnlyRootFilesystem":true,"runAsGroup":10001,"runAsUser":10001,"seccompProfile":{"type":"RuntimeDefault"}}` | Container Security Context. | | crds.create | bool | `true` | Specifies whether the CRDs should be created when installing the chart. | +| dnsPolicy | string | `""` | | | fullnameOverride | string | `""` | | +| hostNetwork | bool | `false` | | | image.pullPolicy | string | `"IfNotPresent"` | | | image.repository | string | `"ghcr.io/cloudnative-pg/cloudnative-pg"` | | | image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion. | @@ -51,6 +53,8 @@ CloudNativePG Operator Helm Chart | monitoring.grafanaDashboard.sidecarLabelValue | string | `"1"` | Label value that ConfigMaps should have to be loaded as dashboards. DEPRECATED: Use labels instead. | | monitoring.podMonitorAdditionalLabels | object | `{}` | Additional labels for the podMonitor | | monitoring.podMonitorEnabled | bool | `false` | Specifies whether the monitoring should be enabled. Requires Prometheus Operator CRDs. | +| monitoring.podMonitorMetricRelabelings | list | `[]` | Metrics relabel configurations to apply to samples before ingestion. | +| monitoring.podMonitorRelabelings | list | `[]` | Relabel configurations to apply to samples before scraping. | | monitoringQueriesConfigMap.name | string | `"cnpg-default-monitoring"` | The name of the default monitoring configmap. | | monitoringQueriesConfigMap.queries | string | `"backends:\n query: |\n SELECT sa.datname\n , sa.usename\n , sa.application_name\n , states.state\n , COALESCE(sa.count, 0) AS total\n , COALESCE(sa.max_tx_secs, 0) AS max_tx_duration_seconds\n FROM ( VALUES ('active')\n , ('idle')\n , ('idle in transaction')\n , ('idle in transaction (aborted)')\n , ('fastpath function call')\n , ('disabled')\n ) AS states(state)\n LEFT JOIN (\n SELECT datname\n , state\n , usename\n , COALESCE(application_name, '') AS application_name\n , COUNT(*)\n , COALESCE(EXTRACT (EPOCH FROM (max(now() - xact_start))), 0) AS max_tx_secs\n FROM pg_catalog.pg_stat_activity\n GROUP BY datname, state, usename, application_name\n ) sa ON states.state = sa.state\n WHERE sa.usename IS NOT NULL\n metrics:\n - datname:\n usage: \"LABEL\"\n description: \"Name of the database\"\n - usename:\n usage: \"LABEL\"\n description: \"Name of the user\"\n - application_name:\n usage: \"LABEL\"\n description: \"Name of the application\"\n - state:\n usage: \"LABEL\"\n description: \"State of the backend\"\n - total:\n usage: \"GAUGE\"\n description: \"Number of backends\"\n - max_tx_duration_seconds:\n usage: \"GAUGE\"\n description: \"Maximum duration of a transaction in seconds\"\n\nbackends_waiting:\n query: |\n SELECT count(*) AS total\n FROM pg_catalog.pg_locks blocked_locks\n JOIN pg_catalog.pg_locks blocking_locks\n ON blocking_locks.locktype = blocked_locks.locktype\n AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database\n AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation\n AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page\n AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple\n AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid\n AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid\n AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid\n AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid\n AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid\n AND blocking_locks.pid != blocked_locks.pid\n JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid\n WHERE NOT blocked_locks.granted\n metrics:\n - total:\n usage: \"GAUGE\"\n description: \"Total number of backends that are currently waiting on other queries\"\n\npg_database:\n query: |\n SELECT datname\n , pg_catalog.pg_database_size(datname) AS size_bytes\n , pg_catalog.age(datfrozenxid) AS xid_age\n , pg_catalog.mxid_age(datminmxid) AS mxid_age\n FROM pg_catalog.pg_database\n metrics:\n - datname:\n usage: \"LABEL\"\n description: \"Name of the database\"\n - size_bytes:\n usage: \"GAUGE\"\n description: \"Disk space used by the database\"\n - xid_age:\n usage: \"GAUGE\"\n description: \"Number of transactions from the frozen XID to the current one\"\n - mxid_age:\n usage: \"GAUGE\"\n description: \"Number of multiple transactions (Multixact) from the frozen XID to the current one\"\n\npg_postmaster:\n query: |\n SELECT EXTRACT(EPOCH FROM pg_postmaster_start_time) AS start_time\n FROM pg_catalog.pg_postmaster_start_time()\n metrics:\n - start_time:\n usage: \"GAUGE\"\n description: \"Time at which postgres started (based on epoch)\"\n\npg_replication:\n query: \"SELECT CASE WHEN (\n NOT pg_catalog.pg_is_in_recovery()\n OR pg_catalog.pg_last_wal_receive_lsn() = pg_catalog.pg_last_wal_replay_lsn())\n THEN 0\n ELSE GREATEST (0,\n EXTRACT(EPOCH FROM (now() - pg_catalog.pg_last_xact_replay_timestamp())))\n END AS lag,\n pg_catalog.pg_is_in_recovery() AS in_recovery,\n EXISTS (TABLE pg_stat_wal_receiver) AS is_wal_receiver_up,\n (SELECT count(*) FROM pg_catalog.pg_stat_replication) AS streaming_replicas\"\n metrics:\n - lag:\n usage: \"GAUGE\"\n description: \"Replication lag behind primary in seconds\"\n - in_recovery:\n usage: \"GAUGE\"\n description: \"Whether the instance is in recovery\"\n - is_wal_receiver_up:\n usage: \"GAUGE\"\n description: \"Whether the instance wal_receiver is up\"\n - streaming_replicas:\n usage: \"GAUGE\"\n description: \"Number of streaming replicas connected to the instance\"\n\npg_replication_slots:\n query: |\n SELECT slot_name,\n slot_type,\n database,\n active,\n (CASE pg_catalog.pg_is_in_recovery()\n WHEN TRUE THEN pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_last_wal_receive_lsn(), restart_lsn)\n ELSE pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), restart_lsn)\n END) as pg_wal_lsn_diff\n FROM pg_catalog.pg_replication_slots\n WHERE NOT temporary\n metrics:\n - slot_name:\n usage: \"LABEL\"\n description: \"Name of the replication slot\"\n - slot_type:\n usage: \"LABEL\"\n description: \"Type of the replication slot\"\n - database:\n usage: \"LABEL\"\n description: \"Name of the database\"\n - active:\n usage: \"GAUGE\"\n description: \"Flag indicating whether the slot is active\"\n - pg_wal_lsn_diff:\n usage: \"GAUGE\"\n description: \"Replication lag in bytes\"\n\npg_stat_archiver:\n query: |\n SELECT archived_count\n , failed_count\n , COALESCE(EXTRACT(EPOCH FROM (now() - last_archived_time)), -1) AS seconds_since_last_archival\n , COALESCE(EXTRACT(EPOCH FROM (now() - last_failed_time)), -1) AS seconds_since_last_failure\n , COALESCE(EXTRACT(EPOCH FROM last_archived_time), -1) AS last_archived_time\n , COALESCE(EXTRACT(EPOCH FROM last_failed_time), -1) AS last_failed_time\n , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_archived_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_archived_wal_start_lsn\n , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_failed_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_failed_wal_start_lsn\n , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time\n FROM pg_catalog.pg_stat_archiver\n metrics:\n - archived_count:\n usage: \"COUNTER\"\n description: \"Number of WAL files that have been successfully archived\"\n - failed_count:\n usage: \"COUNTER\"\n description: \"Number of failed attempts for archiving WAL files\"\n - seconds_since_last_archival:\n usage: \"GAUGE\"\n description: \"Seconds since the last successful archival operation\"\n - seconds_since_last_failure:\n usage: \"GAUGE\"\n description: \"Seconds since the last failed archival operation\"\n - last_archived_time:\n usage: \"GAUGE\"\n description: \"Epoch of the last time WAL archiving succeeded\"\n - last_failed_time:\n usage: \"GAUGE\"\n description: \"Epoch of the last time WAL archiving failed\"\n - last_archived_wal_start_lsn:\n usage: \"GAUGE\"\n description: \"Archived WAL start LSN\"\n - last_failed_wal_start_lsn:\n usage: \"GAUGE\"\n description: \"Last failed WAL LSN\"\n - stats_reset_time:\n usage: \"GAUGE\"\n description: \"Time at which these statistics were last reset\"\n\npg_stat_bgwriter:\n runonserver: \"<17.0.0\"\n query: |\n SELECT checkpoints_timed\n , checkpoints_req\n , checkpoint_write_time\n , checkpoint_sync_time\n , buffers_checkpoint\n , buffers_clean\n , maxwritten_clean\n , buffers_backend\n , buffers_backend_fsync\n , buffers_alloc\n FROM pg_catalog.pg_stat_bgwriter\n metrics:\n - checkpoints_timed:\n usage: \"COUNTER\"\n description: \"Number of scheduled checkpoints that have been performed\"\n - checkpoints_req:\n usage: \"COUNTER\"\n description: \"Number of requested checkpoints that have been performed\"\n - checkpoint_write_time:\n usage: \"COUNTER\"\n description: \"Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds\"\n - checkpoint_sync_time:\n usage: \"COUNTER\"\n description: \"Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds\"\n - buffers_checkpoint:\n usage: \"COUNTER\"\n description: \"Number of buffers written during checkpoints\"\n - buffers_clean:\n usage: \"COUNTER\"\n description: \"Number of buffers written by the background writer\"\n - maxwritten_clean:\n usage: \"COUNTER\"\n description: \"Number of times the background writer stopped a cleaning scan because it had written too many buffers\"\n - buffers_backend:\n usage: \"COUNTER\"\n description: \"Number of buffers written directly by a backend\"\n - buffers_backend_fsync:\n usage: \"COUNTER\"\n description: \"Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)\"\n - buffers_alloc:\n usage: \"COUNTER\"\n description: \"Number of buffers allocated\"\n\npg_stat_database:\n query: |\n SELECT datname\n , xact_commit\n , xact_rollback\n , blks_read\n , blks_hit\n , tup_returned\n , tup_fetched\n , tup_inserted\n , tup_updated\n , tup_deleted\n , conflicts\n , temp_files\n , temp_bytes\n , deadlocks\n , blk_read_time\n , blk_write_time\n FROM pg_catalog.pg_stat_database\n metrics:\n - datname:\n usage: \"LABEL\"\n description: \"Name of this database\"\n - xact_commit:\n usage: \"COUNTER\"\n description: \"Number of transactions in this database that have been committed\"\n - xact_rollback:\n usage: \"COUNTER\"\n description: \"Number of transactions in this database that have been rolled back\"\n - blks_read:\n usage: \"COUNTER\"\n description: \"Number of disk blocks read in this database\"\n - blks_hit:\n usage: \"COUNTER\"\n description: \"Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)\"\n - tup_returned:\n usage: \"COUNTER\"\n description: \"Number of rows returned by queries in this database\"\n - tup_fetched:\n usage: \"COUNTER\"\n description: \"Number of rows fetched by queries in this database\"\n - tup_inserted:\n usage: \"COUNTER\"\n description: \"Number of rows inserted by queries in this database\"\n - tup_updated:\n usage: \"COUNTER\"\n description: \"Number of rows updated by queries in this database\"\n - tup_deleted:\n usage: \"COUNTER\"\n description: \"Number of rows deleted by queries in this database\"\n - conflicts:\n usage: \"COUNTER\"\n description: \"Number of queries canceled due to conflicts with recovery in this database\"\n - temp_files:\n usage: \"COUNTER\"\n description: \"Number of temporary files created by queries in this database\"\n - temp_bytes:\n usage: \"COUNTER\"\n description: \"Total amount of data written to temporary files by queries in this database\"\n - deadlocks:\n usage: \"COUNTER\"\n description: \"Number of deadlocks detected in this database\"\n - blk_read_time:\n usage: \"COUNTER\"\n description: \"Time spent reading data file blocks by backends in this database, in milliseconds\"\n - blk_write_time:\n usage: \"COUNTER\"\n description: \"Time spent writing data file blocks by backends in this database, in milliseconds\"\n\npg_stat_replication:\n primary: true\n query: |\n SELECT usename\n , COALESCE(application_name, '') AS application_name\n , COALESCE(client_addr::text, '') AS client_addr\n , COALESCE(client_port::text, '') AS client_port\n , EXTRACT(EPOCH FROM backend_start) AS backend_start\n , COALESCE(pg_catalog.age(backend_xmin), 0) AS backend_xmin_age\n , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes\n , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), write_lsn) AS write_diff_bytes\n , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), flush_lsn) AS flush_diff_bytes\n , COALESCE(pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), replay_lsn),0) AS replay_diff_bytes\n , COALESCE((EXTRACT(EPOCH FROM write_lag)),0)::float AS write_lag_seconds\n , COALESCE((EXTRACT(EPOCH FROM flush_lag)),0)::float AS flush_lag_seconds\n , COALESCE((EXTRACT(EPOCH FROM replay_lag)),0)::float AS replay_lag_seconds\n FROM pg_catalog.pg_stat_replication\n metrics:\n - usename:\n usage: \"LABEL\"\n description: \"Name of the replication user\"\n - application_name:\n usage: \"LABEL\"\n description: \"Name of the application\"\n - client_addr:\n usage: \"LABEL\"\n description: \"Client IP address\"\n - client_port:\n usage: \"LABEL\"\n description: \"Client TCP port\"\n - backend_start:\n usage: \"COUNTER\"\n description: \"Time when this process was started\"\n - backend_xmin_age:\n usage: \"COUNTER\"\n description: \"The age of this standby's xmin horizon\"\n - sent_diff_bytes:\n usage: \"GAUGE\"\n description: \"Difference in bytes from the last write-ahead log location sent on this connection\"\n - write_diff_bytes:\n usage: \"GAUGE\"\n description: \"Difference in bytes from the last write-ahead log location written to disk by this standby server\"\n - flush_diff_bytes:\n usage: \"GAUGE\"\n description: \"Difference in bytes from the last write-ahead log location flushed to disk by this standby server\"\n - replay_diff_bytes:\n usage: \"GAUGE\"\n description: \"Difference in bytes from the last write-ahead log location replayed into the database on this standby server\"\n - write_lag_seconds:\n usage: \"GAUGE\"\n description: \"Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it\"\n - flush_lag_seconds:\n usage: \"GAUGE\"\n description: \"Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it\"\n - replay_lag_seconds:\n usage: \"GAUGE\"\n description: \"Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it\"\n\npg_settings:\n query: |\n SELECT name,\n CASE setting WHEN 'on' THEN '1' WHEN 'off' THEN '0' ELSE setting END AS setting\n FROM pg_catalog.pg_settings\n WHERE vartype IN ('integer', 'real', 'bool')\n ORDER BY 1\n metrics:\n - name:\n usage: \"LABEL\"\n description: \"Name of the setting\"\n - setting:\n usage: \"GAUGE\"\n description: \"Setting value\"\n"` | A string representation of a YAML defining monitoring queries. | | nameOverride | string | `""` | | diff --git a/argocd-helm-charts/cloudnative-pg/charts/cloudnative-pg/templates/crds/crds.yaml b/argocd-helm-charts/cloudnative-pg/charts/cloudnative-pg/templates/crds/crds.yaml index 72377c3e5..0730228e7 100644 --- a/argocd-helm-charts/cloudnative-pg/charts/cloudnative-pg/templates/crds/crds.yaml +++ b/argocd-helm-charts/cloudnative-pg/charts/cloudnative-pg/templates/crds/crds.yaml @@ -14961,11 +14961,12 @@ spec: method: default: barmanObjectStore description: |- - The backup method to be used, possible options are `barmanObjectStore` - and `volumeSnapshot`. Defaults to: `barmanObjectStore`. + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. enum: - barmanObjectStore - volumeSnapshot + - plugin type: string online: description: |- diff --git a/argocd-helm-charts/cloudnative-pg/charts/cloudnative-pg/templates/deployment.yaml b/argocd-helm-charts/cloudnative-pg/charts/cloudnative-pg/templates/deployment.yaml index 515d52ecc..569752007 100644 --- a/argocd-helm-charts/cloudnative-pg/charts/cloudnative-pg/templates/deployment.yaml +++ b/argocd-helm-charts/cloudnative-pg/charts/cloudnative-pg/templates/deployment.yaml @@ -46,6 +46,12 @@ spec: imagePullSecrets: {{- toYaml . | nindent 8 }} {{- end }} + {{- if .Values.hostNetwork }} + hostNetwork: {{ .Values.hostNetwork }} + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy }} + {{- end }} containers: - args: - controller diff --git a/argocd-helm-charts/cloudnative-pg/charts/cloudnative-pg/templates/podmonitor.yaml b/argocd-helm-charts/cloudnative-pg/charts/cloudnative-pg/templates/podmonitor.yaml index bae86ca8d..cc7bd7622 100644 --- a/argocd-helm-charts/cloudnative-pg/charts/cloudnative-pg/templates/podmonitor.yaml +++ b/argocd-helm-charts/cloudnative-pg/charts/cloudnative-pg/templates/podmonitor.yaml @@ -18,4 +18,12 @@ spec: {{- include "cloudnative-pg.selectorLabels" . | nindent 6 }} podMetricsEndpoints: - port: metrics + {{- with .Values.monitoring.podMonitorMetricRelabelings }} + metricRelabelings: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.monitoring.podMonitorRelabelings }} + relabelings: + {{- toYaml . | nindent 6 }} + {{- end }} {{- end }} diff --git a/argocd-helm-charts/cloudnative-pg/charts/cloudnative-pg/templates/rbac.yaml b/argocd-helm-charts/cloudnative-pg/charts/cloudnative-pg/templates/rbac.yaml index f2bf0e805..864ea0df3 100644 --- a/argocd-helm-charts/cloudnative-pg/charts/cloudnative-pg/templates/rbac.yaml +++ b/argocd-helm-charts/cloudnative-pg/charts/cloudnative-pg/templates/rbac.yaml @@ -163,18 +163,14 @@ rules: - mutatingwebhookconfigurations verbs: - get - - list - patch - - update - apiGroups: - admissionregistration.k8s.io resources: - validatingwebhookconfigurations verbs: - get - - list - patch - - update - apiGroups: - apiextensions.k8s.io resources: diff --git a/argocd-helm-charts/cloudnative-pg/charts/cloudnative-pg/values.schema.json b/argocd-helm-charts/cloudnative-pg/charts/cloudnative-pg/values.schema.json index 6c3779ac2..63fba0454 100644 --- a/argocd-helm-charts/cloudnative-pg/charts/cloudnative-pg/values.schema.json +++ b/argocd-helm-charts/cloudnative-pg/charts/cloudnative-pg/values.schema.json @@ -75,9 +75,15 @@ } } }, + "dnsPolicy": { + "type": "string" + }, "fullnameOverride": { "type": "string" }, + "hostNetwork": { + "type": "boolean" + }, "image": { "type": "object", "properties": { @@ -129,6 +135,12 @@ }, "podMonitorEnabled": { "type": "boolean" + }, + "podMonitorMetricRelabelings": { + "type": "array" + }, + "podMonitorRelabelings": { + "type": "array" } } }, diff --git a/argocd-helm-charts/cloudnative-pg/charts/cloudnative-pg/values.yaml b/argocd-helm-charts/cloudnative-pg/charts/cloudnative-pg/values.yaml index f240cb359..a95165ffd 100644 --- a/argocd-helm-charts/cloudnative-pg/charts/cloudnative-pg/values.yaml +++ b/argocd-helm-charts/cloudnative-pg/charts/cloudnative-pg/values.yaml @@ -29,6 +29,9 @@ imagePullSecrets: [] nameOverride: "" fullnameOverride: "" +hostNetwork: false +dnsPolicy: "" + crds: # -- Specifies whether the CRDs should be created when installing the chart. create: true @@ -148,6 +151,10 @@ monitoring: # -- Specifies whether the monitoring should be enabled. Requires Prometheus Operator CRDs. podMonitorEnabled: false + # -- Metrics relabel configurations to apply to samples before ingestion. + podMonitorMetricRelabelings: [] + # -- Relabel configurations to apply to samples before scraping. + podMonitorRelabelings: [] # -- Additional labels for the podMonitor podMonitorAdditionalLabels: {} @@ -248,6 +255,7 @@ monitoringQueriesConfigMap: , pg_catalog.age(datfrozenxid) AS xid_age , pg_catalog.mxid_age(datminmxid) AS mxid_age FROM pg_catalog.pg_database + WHERE datallowconn metrics: - datname: usage: "LABEL" @@ -412,6 +420,71 @@ monitoringQueriesConfigMap: usage: "COUNTER" description: "Number of buffers allocated" + pg_stat_bgwriter_17: + runonserver: ">=17.0.0" + name: pg_stat_bgwriter + query: | + SELECT buffers_clean + , maxwritten_clean + , buffers_alloc + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_bgwriter + metrics: + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_checkpointer: + runonserver: ">=17.0.0" + query: | + SELECT num_timed AS checkpoints_timed + , num_requested AS checkpoints_req + , restartpoints_timed + , restartpoints_req + , restartpoints_done + , write_time + , sync_time + , buffers_written + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_checkpointer + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - restartpoints_timed: + usage: "COUNTER" + description: "Number of scheduled restartpoints due to timeout or after a failed attempt to perform it" + - restartpoints_req: + usage: "COUNTER" + description: "Number of requested restartpoints that have been performed" + - restartpoints_done: + usage: "COUNTER" + description: "Number of restartpoints that have been performed" + - write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are written to disk, in milliseconds" + - sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are synchronized to disk, in milliseconds" + - buffers_written: + usage: "COUNTER" + description: "Number of buffers written during checkpoints and restartpoints" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + pg_stat_database: query: | SELECT datname From 66c474acf64fc261e9255394acf53ca672031e0c Mon Sep 17 00:00:00 2001 From: Hritik Batra Date: Wed, 31 Jul 2024 20:41:07 +0530 Subject: [PATCH 07/25] [CI] Helm Chart Update dokuwiki --- CHANGELOG.md | 1 + argocd-helm-charts/dokuwiki/Chart.lock | 6 +++--- argocd-helm-charts/dokuwiki/Chart.yaml | 2 +- .../dokuwiki/charts/dokuwiki/Chart.lock | 6 +++--- .../dokuwiki/charts/dokuwiki/Chart.yaml | 8 ++++---- .../dokuwiki/charts/dokuwiki/README.md | 3 ++- .../charts/dokuwiki/charts/common/Chart.yaml | 4 ++-- .../charts/dokuwiki/charts/common/README.md | 2 +- .../charts/common/templates/_resources.tpl | 14 +++++++------- .../dokuwiki/charts/common/templates/_storage.tpl | 7 +++---- .../dokuwiki/charts/dokuwiki/values.yaml | 12 +++++++----- 11 files changed, 34 insertions(+), 31 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ce6c01ebc..e49e6d12c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,3 +10,4 @@ All releases and the changes included in them (pulled from git commits added sin - Updated: cert-manager from version v1.15.1 to v1.15.2 - Updated: cilium from version 1.15.6 to 1.16.0 - Updated: cloudnative-pg from version 0.21.5 to 0.21.6 +- Updated: dokuwiki from version 16.2.6 to 16.2.10 diff --git a/argocd-helm-charts/dokuwiki/Chart.lock b/argocd-helm-charts/dokuwiki/Chart.lock index ea83823f5..712e4419b 100644 --- a/argocd-helm-charts/dokuwiki/Chart.lock +++ b/argocd-helm-charts/dokuwiki/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: dokuwiki repository: https://charts.bitnami.com/bitnami - version: 16.2.6 -digest: sha256:c79046979b9df75b0b0c1661e1c50b2fbd1f8277dde4380057a6f0067d351e94 -generated: "2024-07-09T02:54:37.841430693+05:30" + version: 16.2.10 +digest: sha256:10616cce225d5469909bb3582f2e4ca13c2c834124c8ccdcc13ec17327dcf6bd +generated: "2024-07-31T20:40:57.169802923+05:30" diff --git a/argocd-helm-charts/dokuwiki/Chart.yaml b/argocd-helm-charts/dokuwiki/Chart.yaml index 8d77ca592..6aca4ae47 100644 --- a/argocd-helm-charts/dokuwiki/Chart.yaml +++ b/argocd-helm-charts/dokuwiki/Chart.yaml @@ -4,6 +4,6 @@ version: 12.2.10 # see latest chart here: https://artifacthub.io/packages/helm/bitnami/dokuwiki dependencies: - name: dokuwiki - version: 16.2.6 + version: 16.2.10 repository: https://charts.bitnami.com/bitnami #repository: "oci://ghcr.io/Obmondo" diff --git a/argocd-helm-charts/dokuwiki/charts/dokuwiki/Chart.lock b/argocd-helm-charts/dokuwiki/charts/dokuwiki/Chart.lock index 04dafb550..b2f8917fa 100644 --- a/argocd-helm-charts/dokuwiki/charts/dokuwiki/Chart.lock +++ b/argocd-helm-charts/dokuwiki/charts/dokuwiki/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: common repository: oci://registry-1.docker.io/bitnamicharts - version: 2.20.3 -digest: sha256:569e1c9d81abdcad3891e065c0f23c83786527d2043f2bc68193c43d18886c19 -generated: "2024-06-18T11:17:07.296636591Z" + version: 2.20.5 +digest: sha256:5b98791747a148b9d4956b81bb8635f49a0ae831869d700d52e514b8fd1a2445 +generated: "2024-07-16T12:04:59.432218+02:00" diff --git a/argocd-helm-charts/dokuwiki/charts/dokuwiki/Chart.yaml b/argocd-helm-charts/dokuwiki/charts/dokuwiki/Chart.yaml index 6f0c2a66d..c000de557 100644 --- a/argocd-helm-charts/dokuwiki/charts/dokuwiki/Chart.yaml +++ b/argocd-helm-charts/dokuwiki/charts/dokuwiki/Chart.yaml @@ -2,11 +2,11 @@ annotations: category: Wiki images: | - name: apache-exporter - image: docker.io/bitnami/apache-exporter:1.0.8-debian-12-r2 + image: docker.io/bitnami/apache-exporter:1.0.8-debian-12-r6 - name: dokuwiki - image: docker.io/bitnami/dokuwiki:20240206.1.0-debian-12-r20 + image: docker.io/bitnami/dokuwiki:20240206.1.0-debian-12-r24 - name: os-shell - image: docker.io/bitnami/os-shell:12-debian-12-r24 + image: docker.io/bitnami/os-shell:12-debian-12-r26 licenses: Apache-2.0 apiVersion: v2 appVersion: 20240206.1.0 @@ -34,4 +34,4 @@ maintainers: name: dokuwiki sources: - https://github.com/bitnami/charts/tree/main/bitnami/dokuwiki -version: 16.2.6 +version: 16.2.10 diff --git a/argocd-helm-charts/dokuwiki/charts/dokuwiki/README.md b/argocd-helm-charts/dokuwiki/charts/dokuwiki/README.md index a4c1802fd..2731003f3 100644 --- a/argocd-helm-charts/dokuwiki/charts/dokuwiki/README.md +++ b/argocd-helm-charts/dokuwiki/charts/dokuwiki/README.md @@ -133,7 +133,8 @@ See the [Parameters](#parameters) section to configure the PVC or to disable per | ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------ | | `global.imageRegistry` | Global Docker image registry | `""` | | `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | -| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | +| `global.defaultStorageClass` | Global default StorageClass for Persistent Volume(s) | `""` | +| `global.storageClass` | DEPRECATED: use global.defaultStorageClass instead | `""` | | `global.compatibility.openshift.adaptSecurityContext` | Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) | `auto` | ### Common parameters diff --git a/argocd-helm-charts/dokuwiki/charts/dokuwiki/charts/common/Chart.yaml b/argocd-helm-charts/dokuwiki/charts/dokuwiki/charts/common/Chart.yaml index 23ba4e4e7..dabd80681 100644 --- a/argocd-helm-charts/dokuwiki/charts/dokuwiki/charts/common/Chart.yaml +++ b/argocd-helm-charts/dokuwiki/charts/dokuwiki/charts/common/Chart.yaml @@ -2,7 +2,7 @@ annotations: category: Infrastructure licenses: Apache-2.0 apiVersion: v2 -appVersion: 2.20.3 +appVersion: 2.20.5 description: A Library Helm Chart for grouping common logic between bitnami charts. This chart is not deployable by itself. home: https://bitnami.com @@ -20,4 +20,4 @@ name: common sources: - https://github.com/bitnami/charts/tree/main/bitnami/common type: library -version: 2.20.3 +version: 2.20.5 diff --git a/argocd-helm-charts/dokuwiki/charts/dokuwiki/charts/common/README.md b/argocd-helm-charts/dokuwiki/charts/dokuwiki/charts/common/README.md index 82d78a384..fee26c991 100644 --- a/argocd-helm-charts/dokuwiki/charts/dokuwiki/charts/common/README.md +++ b/argocd-helm-charts/dokuwiki/charts/dokuwiki/charts/common/README.md @@ -24,7 +24,7 @@ data: myvalue: "Hello World" ``` -Looking to use our applications in production? Try [VMware Tanzu Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog. +Looking to use our applications in production? Try [VMware Tanzu Application Catalog](https://bitnami.com/enterprise), the commercial edition of the Bitnami catalog. ## Introduction diff --git a/argocd-helm-charts/dokuwiki/charts/dokuwiki/charts/common/templates/_resources.tpl b/argocd-helm-charts/dokuwiki/charts/dokuwiki/charts/common/templates/_resources.tpl index b4491f782..d8a43e1c2 100644 --- a/argocd-helm-charts/dokuwiki/charts/dokuwiki/charts/common/templates/_resources.tpl +++ b/argocd-helm-charts/dokuwiki/charts/dokuwiki/charts/common/templates/_resources.tpl @@ -15,31 +15,31 @@ These presets are for basic testing and not meant to be used in production {{- $presets := dict "nano" (dict "requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi") ) "micro" (dict "requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi") ) "small" (dict "requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi") ) "medium" (dict "requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi") ) "large" (dict "requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi") ) "xlarge" (dict "requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi") ) "2xlarge" (dict "requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi") ) }} {{- if hasKey $presets .type -}} diff --git a/argocd-helm-charts/dokuwiki/charts/dokuwiki/charts/common/templates/_storage.tpl b/argocd-helm-charts/dokuwiki/charts/dokuwiki/charts/common/templates/_storage.tpl index 7780da18b..aa75856c0 100644 --- a/argocd-helm-charts/dokuwiki/charts/dokuwiki/charts/common/templates/_storage.tpl +++ b/argocd-helm-charts/dokuwiki/charts/dokuwiki/charts/common/templates/_storage.tpl @@ -4,19 +4,18 @@ SPDX-License-Identifier: APACHE-2.0 */}} {{/* vim: set filetype=mustache: */}} + {{/* Return the proper Storage Class {{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} */}} {{- define "common.storage.class" -}} - -{{- $storageClass := default .persistence.storageClass ((.global).storageClass) -}} +{{- $storageClass := (.global).storageClass | default .persistence.storageClass | default (.global).defaultStorageClass | default "" -}} {{- if $storageClass -}} {{- if (eq "-" $storageClass) -}} {{- printf "storageClassName: \"\"" -}} - {{- else }} + {{- else -}} {{- printf "storageClassName: %s" $storageClass -}} {{- end -}} {{- end -}} - {{- end -}} diff --git a/argocd-helm-charts/dokuwiki/charts/dokuwiki/values.yaml b/argocd-helm-charts/dokuwiki/charts/dokuwiki/values.yaml index 10a00b96d..a69d043f3 100644 --- a/argocd-helm-charts/dokuwiki/charts/dokuwiki/values.yaml +++ b/argocd-helm-charts/dokuwiki/charts/dokuwiki/values.yaml @@ -9,7 +9,8 @@ ## @param global.imageRegistry Global Docker image registry ## @param global.imagePullSecrets Global Docker registry secret names as an array -## @param global.storageClass Global StorageClass for Persistent Volume(s) +## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s) +## @param global.storageClass DEPRECATED: use global.defaultStorageClass instead ## global: imageRegistry: "" @@ -18,6 +19,7 @@ global: ## - myRegistryKeySecretName ## imagePullSecrets: [] + defaultStorageClass: "" storageClass: "" ## Compatibility adaptations for Kubernetes platforms ## @@ -68,7 +70,7 @@ extraDeploy: [] image: registry: docker.io repository: bitnami/dokuwiki - tag: 20240206.1.0-debian-12-r20 + tag: 20240206.1.0-debian-12-r24 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' @@ -648,7 +650,7 @@ volumePermissions: image: registry: docker.io repository: bitnami/os-shell - tag: 12-debian-12-r24 + tag: 12-debian-12-r26 digest: "" pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. @@ -706,7 +708,7 @@ metrics: image: registry: docker.io repository: bitnami/apache-exporter - tag: 1.0.8-debian-12-r2 + tag: 1.0.8-debian-12-r6 digest: "" pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. @@ -790,7 +792,7 @@ certificates: image: registry: docker.io repository: bitnami/os-shell - tag: 12-debian-12-r24 + tag: 12-debian-12-r26 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' From 8c4da1fbaa4ad4813a1f2b10a5b6f6c61091ca9f Mon Sep 17 00:00:00 2001 From: Hritik Batra Date: Wed, 31 Jul 2024 20:41:41 +0530 Subject: [PATCH 08/25] [CI] Helm Chart Update external-dns --- CHANGELOG.md | 1 + argocd-helm-charts/external-dns/Chart.lock | 6 ++--- argocd-helm-charts/external-dns/Chart.yaml | 2 +- .../charts/external-dns/Chart.lock | 6 ++--- .../charts/external-dns/Chart.yaml | 4 ++-- .../charts/external-dns/README.md | 7 +++++- .../external-dns/charts/common/Chart.yaml | 4 ++-- .../external-dns/charts/common/README.md | 2 +- .../charts/common/templates/_resources.tpl | 14 +++++------ .../charts/common/templates/_storage.tpl | 7 +++--- .../external-dns/templates/_helpers.tpl | 8 ++++--- .../external-dns/templates/deployment.yaml | 14 +++++++++-- .../charts/external-dns/values.yaml | 23 +++++++++++++++++-- 13 files changed, 67 insertions(+), 31 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e49e6d12c..3cfa97b20 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,3 +11,4 @@ All releases and the changes included in them (pulled from git commits added sin - Updated: cilium from version 1.15.6 to 1.16.0 - Updated: cloudnative-pg from version 0.21.5 to 0.21.6 - Updated: dokuwiki from version 16.2.6 to 16.2.10 +- Updated: external-dns from version 8.1.0 to 8.3.3 diff --git a/argocd-helm-charts/external-dns/Chart.lock b/argocd-helm-charts/external-dns/Chart.lock index 2db869939..3112cf3af 100644 --- a/argocd-helm-charts/external-dns/Chart.lock +++ b/argocd-helm-charts/external-dns/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: external-dns repository: https://charts.bitnami.com/bitnami - version: 8.1.0 -digest: sha256:f61ff11e2a0bb3a7201ff118cbe8fe9acab7022f7349d30e4f6c52eb29bc3747 -generated: "2024-07-09T02:53:17.136731122+05:30" + version: 8.3.3 +digest: sha256:9081c27e0e01562b0926f9d6699c12fefafcfea97031231706a31be63766dd26 +generated: "2024-07-31T20:41:30.843285539+05:30" diff --git a/argocd-helm-charts/external-dns/Chart.yaml b/argocd-helm-charts/external-dns/Chart.yaml index 145dfe69d..5068df2ec 100644 --- a/argocd-helm-charts/external-dns/Chart.yaml +++ b/argocd-helm-charts/external-dns/Chart.yaml @@ -3,5 +3,5 @@ name: external-dns version: 0.10.2 dependencies: - name: external-dns - version: 8.1.0 + version: 8.3.3 repository: https://charts.bitnami.com/bitnami diff --git a/argocd-helm-charts/external-dns/charts/external-dns/Chart.lock b/argocd-helm-charts/external-dns/charts/external-dns/Chart.lock index a39e57017..77e034803 100644 --- a/argocd-helm-charts/external-dns/charts/external-dns/Chart.lock +++ b/argocd-helm-charts/external-dns/charts/external-dns/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: common repository: oci://registry-1.docker.io/bitnamicharts - version: 2.20.3 -digest: sha256:569e1c9d81abdcad3891e065c0f23c83786527d2043f2bc68193c43d18886c19 -generated: "2024-06-18T11:35:47.714107662Z" + version: 2.20.5 +digest: sha256:5b98791747a148b9d4956b81bb8635f49a0ae831869d700d52e514b8fd1a2445 +generated: "2024-07-23T22:57:10.60304379Z" diff --git a/argocd-helm-charts/external-dns/charts/external-dns/Chart.yaml b/argocd-helm-charts/external-dns/charts/external-dns/Chart.yaml index e69e05a99..76c0d0d34 100644 --- a/argocd-helm-charts/external-dns/charts/external-dns/Chart.yaml +++ b/argocd-helm-charts/external-dns/charts/external-dns/Chart.yaml @@ -2,7 +2,7 @@ annotations: category: DeveloperTools images: | - name: external-dns - image: docker.io/bitnami/external-dns:0.14.2-debian-12-r4 + image: docker.io/bitnami/external-dns:0.14.2-debian-12-r7 licenses: Apache-2.0 apiVersion: v2 appVersion: 0.14.2 @@ -26,4 +26,4 @@ maintainers: name: external-dns sources: - https://github.com/bitnami/charts/tree/main/bitnami/external-dns -version: 8.1.0 +version: 8.3.3 diff --git a/argocd-helm-charts/external-dns/charts/external-dns/README.md b/argocd-helm-charts/external-dns/charts/external-dns/README.md index af53747a7..4285195d6 100644 --- a/argocd-helm-charts/external-dns/charts/external-dns/README.md +++ b/argocd-helm-charts/external-dns/charts/external-dns/README.md @@ -127,6 +127,7 @@ helm install my-release \ | `image.digest` | ExternalDNS image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | | `image.pullPolicy` | ExternalDNS image pull policy | `IfNotPresent` | | `image.pullSecrets` | ExternalDNS image pull secrets | `[]` | +| `revisionHistoryLimit` | sets number of replicaset to keep in k8s | `10` | | `automountServiceAccountToken` | Mount Service Account token in pod | `true` | | `hostAliases` | Deployment pod host aliases | `[]` | | `updateStrategy` | update strategy type | `{}` | @@ -246,6 +247,7 @@ helm install my-release \ | `infoblox.wapiConnectionPoolSize` | When using the Infoblox provider, specify the Infoblox WAPI request connection pool size (optional) | `""` | | `infoblox.wapiHttpTimeout` | When using the Infoblox provider, specify the Infoblox WAPI request timeout in seconds (optional) | `""` | | `infoblox.maxResults` | When using the Infoblox provider, specify the Infoblox Max Results (optional) | `""` | +| `infoblox.createPtr` | When using the Infoblox provider, specify the Infoblox create PTR flag (optional) | `false` | | `linode.apiToken` | When using the Linode provider, `LINODE_TOKEN` to set (optional) | `""` | | `linode.secretName` | Use an existing secret with key "linode_api_token" defined. | `""` | | `ns1.minTTL` | When using the ns1 provider, specify minimal TTL, as an integer, for records | `10` | @@ -255,6 +257,8 @@ helm install my-release \ | `pihole.tlsSkipVerify` | When using the Pi-hole provider, specify wheter to skip verification of any TLS certificates served by the Pi-hole web server | `""` | | `pihole.password` | When using the Pi-hole provider, specify a password to use | `""` | | `pihole.secretName` | Use an existing secret with key "pihole_password" defined. | `""` | +| `traefik.disableNew` | Disable listeners on Resources under traefik.io | `false` | +| `traefik.disableLegacy` | Disable listeners on Resources under traefik.containo.us | `false` | | `oci.region` | When using the OCI provider, specify the region, where your zone is located in. | `""` | | `oci.tenancyOCID` | When using the OCI provider, specify your Tenancy OCID | `""` | | `oci.userOCID` | When using the OCI provider, specify your User OCID | `""` | @@ -271,6 +275,7 @@ helm install my-release \ | `ovh.secretName` | When using the OVH provider, it's the name of the secret containing `ovh_consumer_key`, `ovh_application_key` and `ovh_application_secret`. Disables usage of other `ovh`. | `""` | | `scaleway.scwAccessKey` | When using the Scaleway provider, specify an existing access key. (required when provider=scaleway) | `""` | | `scaleway.scwSecretKey` | When using the Scaleway provider, specify an existing secret key. (required when provider=scaleway) | `""` | +| `scaleway.secretName` | Use an existing secret with keys "scaleway_access_key" and "scaleway_secret_key" defined (optional). | `""` | | `rfc2136.host` | When using the rfc2136 provider, specify the RFC2136 host (required when provider=rfc2136) | `""` | | `rfc2136.port` | When using the rfc2136 provider, specify the RFC2136 port (optional) | `53` | | `rfc2136.zone` | When using the rfc2136 provider, specify the zone (required when provider=rfc2136) | `""` | @@ -555,4 +560,4 @@ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and -limitations under the License. +limitations under the License. \ No newline at end of file diff --git a/argocd-helm-charts/external-dns/charts/external-dns/charts/common/Chart.yaml b/argocd-helm-charts/external-dns/charts/external-dns/charts/common/Chart.yaml index 23ba4e4e7..dabd80681 100644 --- a/argocd-helm-charts/external-dns/charts/external-dns/charts/common/Chart.yaml +++ b/argocd-helm-charts/external-dns/charts/external-dns/charts/common/Chart.yaml @@ -2,7 +2,7 @@ annotations: category: Infrastructure licenses: Apache-2.0 apiVersion: v2 -appVersion: 2.20.3 +appVersion: 2.20.5 description: A Library Helm Chart for grouping common logic between bitnami charts. This chart is not deployable by itself. home: https://bitnami.com @@ -20,4 +20,4 @@ name: common sources: - https://github.com/bitnami/charts/tree/main/bitnami/common type: library -version: 2.20.3 +version: 2.20.5 diff --git a/argocd-helm-charts/external-dns/charts/external-dns/charts/common/README.md b/argocd-helm-charts/external-dns/charts/external-dns/charts/common/README.md index 82d78a384..fee26c991 100644 --- a/argocd-helm-charts/external-dns/charts/external-dns/charts/common/README.md +++ b/argocd-helm-charts/external-dns/charts/external-dns/charts/common/README.md @@ -24,7 +24,7 @@ data: myvalue: "Hello World" ``` -Looking to use our applications in production? Try [VMware Tanzu Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog. +Looking to use our applications in production? Try [VMware Tanzu Application Catalog](https://bitnami.com/enterprise), the commercial edition of the Bitnami catalog. ## Introduction diff --git a/argocd-helm-charts/external-dns/charts/external-dns/charts/common/templates/_resources.tpl b/argocd-helm-charts/external-dns/charts/external-dns/charts/common/templates/_resources.tpl index b4491f782..d8a43e1c2 100644 --- a/argocd-helm-charts/external-dns/charts/external-dns/charts/common/templates/_resources.tpl +++ b/argocd-helm-charts/external-dns/charts/external-dns/charts/common/templates/_resources.tpl @@ -15,31 +15,31 @@ These presets are for basic testing and not meant to be used in production {{- $presets := dict "nano" (dict "requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi") ) "micro" (dict "requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi") ) "small" (dict "requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi") ) "medium" (dict "requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi") ) "large" (dict "requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi") ) "xlarge" (dict "requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi") ) "2xlarge" (dict "requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi") ) }} {{- if hasKey $presets .type -}} diff --git a/argocd-helm-charts/external-dns/charts/external-dns/charts/common/templates/_storage.tpl b/argocd-helm-charts/external-dns/charts/external-dns/charts/common/templates/_storage.tpl index 7780da18b..aa75856c0 100644 --- a/argocd-helm-charts/external-dns/charts/external-dns/charts/common/templates/_storage.tpl +++ b/argocd-helm-charts/external-dns/charts/external-dns/charts/common/templates/_storage.tpl @@ -4,19 +4,18 @@ SPDX-License-Identifier: APACHE-2.0 */}} {{/* vim: set filetype=mustache: */}} + {{/* Return the proper Storage Class {{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} */}} {{- define "common.storage.class" -}} - -{{- $storageClass := default .persistence.storageClass ((.global).storageClass) -}} +{{- $storageClass := (.global).storageClass | default .persistence.storageClass | default (.global).defaultStorageClass | default "" -}} {{- if $storageClass -}} {{- if (eq "-" $storageClass) -}} {{- printf "storageClassName: \"\"" -}} - {{- else }} + {{- else -}} {{- printf "storageClassName: %s" $storageClass -}} {{- end -}} {{- end -}} - {{- end -}} diff --git a/argocd-helm-charts/external-dns/charts/external-dns/templates/_helpers.tpl b/argocd-helm-charts/external-dns/charts/external-dns/templates/_helpers.tpl index 702368f76..2cfe8069c 100644 --- a/argocd-helm-charts/external-dns/charts/external-dns/templates/_helpers.tpl +++ b/argocd-helm-charts/external-dns/charts/external-dns/templates/_helpers.tpl @@ -122,7 +122,7 @@ Return true if a secret object should be created {{- true -}} {{- else if and (eq .Values.provider "ovh") .Values.ovh.consumerKey (not .Values.ovh.secretName) -}} {{- true -}} -{{- else if and (eq .Values.provider "scaleway") .Values.scaleway.scwAccessKey -}} +{{- else if and (eq .Values.provider "scaleway") .Values.scaleway.scwAccessKey (not .Values.scaleway.secretName) -}} {{- true -}} {{- else if and (eq .Values.provider "vinyldns") (or .Values.vinyldns.secretKey .Values.vinyldns.accessKey) -}} {{- true -}} @@ -189,6 +189,8 @@ Return the name of the Secret used to store the passwords {{- .Values.civo.secretName }} {{- else if and (eq .Values.provider "pihole") .Values.pihole.secretName }} {{- .Values.pihole.secretName }} +{{- else if and (eq .Values.provider "scaleway") .Values.scaleway.secretName }} +{{- .Values.scaleway.secretName }} {{- else -}} {{- template "external-dns.fullname" . }} {{- end -}} @@ -807,7 +809,7 @@ Validate values of External DNS: - must provide the Scaleway access key when provider is "scaleway" */}} {{- define "external-dns.validateValues.scaleway.scwAccessKey" -}} -{{- if and (eq .Values.provider "scaleway") (not .Values.scaleway.scwAccessKey) -}} +{{- if and (eq .Values.provider "scaleway") (not .Values.scaleway.scwAccessKey) (not .Values.scaleway.secretName) -}} external-dns: scaleway.scwAccessKey You must provide the Scaleway access key when provider="scaleway". Please set the scwAccessKey parameter (--set scaleway.scwAccessKey="xxxx") @@ -819,7 +821,7 @@ Validate values of External DNS: - must provide the scaleway secret key when provider is "scaleway" */}} {{- define "external-dns.validateValues.scaleway.scwSecretKey" -}} -{{- if and (eq .Values.provider "scaleway") (not .Values.scaleway.scwSecretKey) -}} +{{- if and (eq .Values.provider "scaleway") (not .Values.scaleway.scwSecretKey) (not .Values.scaleway.secretName) -}} external-dns: scaleway.scwSecretKey You must provide the scaleway secret key when provider="scaleway". Please set the scwSecretKey parameter (--set scaleway.scwSecretKey="xxxx") diff --git a/argocd-helm-charts/external-dns/charts/external-dns/templates/deployment.yaml b/argocd-helm-charts/external-dns/charts/external-dns/templates/deployment.yaml index d36712ff7..021403b4a 100644 --- a/argocd-helm-charts/external-dns/charts/external-dns/templates/deployment.yaml +++ b/argocd-helm-charts/external-dns/charts/external-dns/templates/deployment.yaml @@ -13,6 +13,7 @@ metadata: annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} {{- end }} spec: + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} replicas: 1 {{- if .Values.updateStrategy }} strategy: {{- toYaml .Values.updateStrategy | nindent 4 }} @@ -276,6 +277,9 @@ spec: - --no-infoblox-ssl-verify {{- else }} - --infoblox-ssl-verify + {{- end }} + {{- if .Values.infoblox.createPtr }} + - --infoblox-create-ptr {{- end }} {{- if .Values.infoblox.maxResults }} - --infoblox-max-results={{ .Values.infoblox.maxResults }} @@ -331,6 +335,12 @@ spec: - --transip-account={{ .Values.transip.account }} - --transip-keyfile=/transip/transip-api-key {{- end }} + {{- if .Values.traefik.disableLegacy }} + - --traefik-disable-legacy + {{- end }} + {{- if .Values.traefik.disableNew }} + - --traefik-disable-new + {{- end }} {{- if .Values.txtEncrypt.enabled }} # TXT registry encryption - --txt-encrypt-enabled @@ -574,14 +584,14 @@ spec: {{- end }} {{- if eq .Values.provider "scaleway" }} # Scaleway environment variables - {{- if .Values.scaleway.scwAccessKey }} + {{- if or (.Values.scaleway.scwAccessKey) (.Values.scaleway.secretName) }} - name: SCW_ACCESS_KEY valueFrom: secretKeyRef: name: {{ template "external-dns.secretName" . }} key: scw_access_key {{- end }} - {{- if .Values.scaleway.scwSecretKey }} + {{- if or (.Values.scaleway.scwSecretKey) (.Values.scaleway.secretName) }} - name: SCW_SECRET_KEY valueFrom: secretKeyRef: diff --git a/argocd-helm-charts/external-dns/charts/external-dns/values.yaml b/argocd-helm-charts/external-dns/charts/external-dns/values.yaml index 559349ca8..eba4a4175 100644 --- a/argocd-helm-charts/external-dns/charts/external-dns/values.yaml +++ b/argocd-helm-charts/external-dns/charts/external-dns/values.yaml @@ -69,7 +69,7 @@ watchReleaseNamespace: false image: registry: docker.io repository: bitnami/external-dns - tag: 0.14.2-debian-12-r4 + tag: 0.14.2-debian-12-r7 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' @@ -84,6 +84,9 @@ image: ## - myRegistryKeySecretName ## pullSecrets: [] +## @param revisionHistoryLimit sets number of replicaset to keep in k8s +## +revisionHistoryLimit: 10 ## @param automountServiceAccountToken Mount Service Account token in pod ## automountServiceAccountToken: true @@ -547,6 +550,9 @@ infoblox: ## @param infoblox.maxResults When using the Infoblox provider, specify the Infoblox Max Results (optional) ## maxResults: "" + ## @param infoblox.createPtr When using the Infoblox provider, specify the Infoblox create PTR flag (optional) + ## + createPtr: false ## Linode configuration to be set via arguments/env. variables ## linode: @@ -585,6 +591,15 @@ pihole: ## @param pihole.secretName Use an existing secret with key "pihole_password" defined. ## secretName: "" +## Compatibility adaptations for Traefik +## +traefik: + ## @param traefik.disableNew Disable listeners on Resources under traefik.io + ## + disableNew: false + ## @param traefik.disableLegacy Disable listeners on Resources under traefik.containo.us + ## + disableLegacy: false ## oci configuration to be set via arguments/env. variables ## oci: @@ -650,6 +665,10 @@ scaleway: ## @param scaleway.scwSecretKey When using the Scaleway provider, specify an existing secret key. (required when provider=scaleway) ## scwSecretKey: "" + ## @param scaleway.secretName Use an existing secret with keys "scaleway_access_key" and "scaleway_secret_key" defined (optional). + ## This ignores scaleway.scwAccessKey and scaleway.scwSecretKey + ## + secretName: "" ## RFC 2136 configuration to be set via arguments/env. variables ## rfc2136: @@ -782,7 +801,7 @@ logFormat: text ## policy: upsert-only ## @param registry Registry method to use (options: txt, aws-sd, dynamodb, noop) -## ref: https://github.com/kubernetes-sigs/external-dns/blob/master/docs/proposal/registry.md +## ref: https://github.com/kubernetes-sigs/external-dns/blob/master/docs/registry/registry.md ## registry: "txt" ## @param txtPrefix When using the TXT registry, a prefix for ownership records that avoids collision with CNAME entries (optional) (Mutual exclusive with txt-suffix) From 556201b1f81b53b022e01cea0fa960ed68e5f301 Mon Sep 17 00:00:00 2001 From: Hritik Batra Date: Wed, 31 Jul 2024 20:42:27 +0530 Subject: [PATCH 09/25] [CI] Helm Chart Update fluent-bit --- CHANGELOG.md | 1 + argocd-helm-charts/fluent-bit/Chart.lock | 6 +++--- argocd-helm-charts/fluent-bit/Chart.yaml | 2 +- argocd-helm-charts/fluent-bit/charts/fluent-bit/Chart.yaml | 6 +++--- .../fluent-bit/charts/fluent-bit/templates/clusterrole.yaml | 1 + 5 files changed, 9 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3cfa97b20..0af4fdeff 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,3 +12,4 @@ All releases and the changes included in them (pulled from git commits added sin - Updated: cloudnative-pg from version 0.21.5 to 0.21.6 - Updated: dokuwiki from version 16.2.6 to 16.2.10 - Updated: external-dns from version 8.1.0 to 8.3.3 +- Updated: fluent-bit from version 0.46.11 to 0.47.5 diff --git a/argocd-helm-charts/fluent-bit/Chart.lock b/argocd-helm-charts/fluent-bit/Chart.lock index 7b030d1bf..a2f57d84d 100644 --- a/argocd-helm-charts/fluent-bit/Chart.lock +++ b/argocd-helm-charts/fluent-bit/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: fluent-bit repository: https://fluent.github.io/helm-charts - version: 0.46.11 -digest: sha256:4ed5248be069f55e2746d9e84e62b5c85083a38c71fae1815779aa6f35d9b55c -generated: "2024-07-09T02:53:22.027598894+05:30" + version: 0.47.5 +digest: sha256:d2b6cee48d6d8c45439c31d72154ada365bb4869ef597825df73bac8898afc7a +generated: "2024-07-31T20:42:15.050633673+05:30" diff --git a/argocd-helm-charts/fluent-bit/Chart.yaml b/argocd-helm-charts/fluent-bit/Chart.yaml index 4f60eacd8..f1e318409 100644 --- a/argocd-helm-charts/fluent-bit/Chart.yaml +++ b/argocd-helm-charts/fluent-bit/Chart.yaml @@ -3,5 +3,5 @@ name: fluent-bit version: 1.0.0 dependencies: - name: fluent-bit - version: 0.46.11 + version: 0.47.5 repository: https://fluent.github.io/helm-charts diff --git a/argocd-helm-charts/fluent-bit/charts/fluent-bit/Chart.yaml b/argocd-helm-charts/fluent-bit/charts/fluent-bit/Chart.yaml index f6bf7d5db..39b30e7e7 100644 --- a/argocd-helm-charts/fluent-bit/charts/fluent-bit/Chart.yaml +++ b/argocd-helm-charts/fluent-bit/charts/fluent-bit/Chart.yaml @@ -1,9 +1,9 @@ annotations: artifacthub.io/changes: | - kind: changed - description: "Updated Fluent Bit OCI image to v3.0.7." + description: "Updated Fluent Bit OCI image to v3.1.4." apiVersion: v1 -appVersion: 3.0.7 +appVersion: 3.1.4 description: Fast and lightweight log processor and forwarder or Linux, OSX and BSD family operating systems. home: https://fluentbit.io/ @@ -24,4 +24,4 @@ maintainers: name: fluent-bit sources: - https://github.com/fluent/fluent-bit/ -version: 0.46.11 +version: 0.47.5 diff --git a/argocd-helm-charts/fluent-bit/charts/fluent-bit/templates/clusterrole.yaml b/argocd-helm-charts/fluent-bit/charts/fluent-bit/templates/clusterrole.yaml index d44db638b..550ac8fec 100644 --- a/argocd-helm-charts/fluent-bit/charts/fluent-bit/templates/clusterrole.yaml +++ b/argocd-helm-charts/fluent-bit/charts/fluent-bit/templates/clusterrole.yaml @@ -13,6 +13,7 @@ rules: - pods {{- if .Values.rbac.nodeAccess }} - nodes + - nodes/metrics - nodes/proxy {{- end }} {{- if .Values.rbac.eventsAccess }} From 0c5a9e0d458906a0a5c9f1b4703e94bb3b20f977 Mon Sep 17 00:00:00 2001 From: Hritik Batra Date: Wed, 31 Jul 2024 20:43:13 +0530 Subject: [PATCH 10/25] [CI] Helm Chart Update gitlab-runner --- CHANGELOG.md | 1 + argocd-helm-charts/gitlab-runner/Chart.lock | 6 +- argocd-helm-charts/gitlab-runner/Chart.yaml | 2 +- .../charts/gitlab-runner/CHANGELOG.md | 32 +++-- .../charts/gitlab-runner/Chart.yaml | 4 +- .../gitlab-runner/templates/_env_vars.tpl | 20 +++ .../gitlab-runner/templates/_helpers.tpl | 15 +++ .../gitlab-runner/templates/configmap.yaml | 124 +++++++++++++----- .../gitlab-runner/templates/deployment.yaml | 22 ++-- .../templates/service-session-server.yaml | 10 +- .../charts/gitlab-runner/values.yaml | 29 +++- 11 files changed, 195 insertions(+), 70 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0af4fdeff..32ce2b011 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,3 +13,4 @@ All releases and the changes included in them (pulled from git commits added sin - Updated: dokuwiki from version 16.2.6 to 16.2.10 - Updated: external-dns from version 8.1.0 to 8.3.3 - Updated: fluent-bit from version 0.46.11 to 0.47.5 +- Updated: gitlab-runner from version 0.66.0 to 0.67.1 diff --git a/argocd-helm-charts/gitlab-runner/Chart.lock b/argocd-helm-charts/gitlab-runner/Chart.lock index 24de51903..e25aea0a4 100644 --- a/argocd-helm-charts/gitlab-runner/Chart.lock +++ b/argocd-helm-charts/gitlab-runner/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: gitlab-runner repository: https://charts.gitlab.io - version: 0.66.0 -digest: sha256:1d8cd57fb0ec7b1aeacf42cefad340cd1e9379505229ec46661462a861ae9cc4 -generated: "2024-07-09T02:54:57.788652681+05:30" + version: 0.67.1 +digest: sha256:e8bdbc4c2dc7229217ad9b4a39c91db146ffe24662e562591af6c36088a31b70 +generated: "2024-07-31T20:43:01.9040856+05:30" diff --git a/argocd-helm-charts/gitlab-runner/Chart.yaml b/argocd-helm-charts/gitlab-runner/Chart.yaml index c34c8d5e2..a4a8a6743 100644 --- a/argocd-helm-charts/gitlab-runner/Chart.yaml +++ b/argocd-helm-charts/gitlab-runner/Chart.yaml @@ -3,7 +3,7 @@ name: gitlab-runner version: 1.0.0 dependencies: - name: gitlab-runner - version: 0.66.0 + version: 0.67.1 repository: https://charts.gitlab.io #repository: "oci://ghcr.io/Obmondo" # see version here: https://gitlab.com/gitlab-org/charts/gitlab-runner/-/blob/main/Chart.yaml diff --git a/argocd-helm-charts/gitlab-runner/charts/gitlab-runner/CHANGELOG.md b/argocd-helm-charts/gitlab-runner/charts/gitlab-runner/CHANGELOG.md index 875844514..103c8942e 100644 --- a/argocd-helm-charts/gitlab-runner/charts/gitlab-runner/CHANGELOG.md +++ b/argocd-helm-charts/gitlab-runner/charts/gitlab-runner/CHANGELOG.md @@ -1,33 +1,41 @@ -## v0.66.0 (2024-06-20) +## v0.67.1 (2024-07-26) ### New features -- Update GitLab Runner version to v17.1.0 -- Make lifecycle options configurable in the deployment !473 (Marcel Eichler @marcel1802) -- Add dedicated ServiceAccount configuration !415 (Fabian Schneider @fabsrc) +- Update GitLab Runner version to v17.2.1 + +## v0.67.0 (2024-07-18) + +### New features + +- Update GitLab Runner version to v17.2.0 +- Make livenessProbe and readinessProbe configurable !483 +- Add support for different service types for session-server !476 (Ummet Civi @ummetcivi) +- Ignore timeout on verify command for the livenessProbe !457 (Thomas de Grenier de Latour @thomasgl-orange) ### Bug fixes -- Fix replicas value check for nil to work also with Terraform !478 (Sabyrzhan Tynybayev @sabyrzhan) -- Update list of rules to be added to the rbac role permissions as per documentation !471 (Ismael Posada Trobo @iposadat) +- Fix the register-the-runner script !479 (Jeremy Huntwork @jhuntwork) ### Maintenance - Remove registration token integration test !477 -## v0.65.0 (2024-05-23) +## v0.65.1 (2024-07-06) ### New features -- Update GitLab Runner version to v17.0.0 +- Update GitLab Runner version to v17.0.1 -### Maintenance +## v0.65.0 (2024-05-23) -- Default to https in values.yaml !470 +### New features -### Other changes +- Update GitLab Runner version to v16.11.2 -- chore: set the checkInterval value the same as in the main documents. !472 (Michel Santello @michel.santello) +### Maintenance + +- Remove registration token integration test !477 ## v0.64.1 (2024-05-03) diff --git a/argocd-helm-charts/gitlab-runner/charts/gitlab-runner/Chart.yaml b/argocd-helm-charts/gitlab-runner/charts/gitlab-runner/Chart.yaml index da5716407..6c0c9f13b 100644 --- a/argocd-helm-charts/gitlab-runner/charts/gitlab-runner/Chart.yaml +++ b/argocd-helm-charts/gitlab-runner/charts/gitlab-runner/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v1 -appVersion: 17.1.0 +appVersion: 17.2.1 description: GitLab Runner icon: https://gitlab.com/uploads/-/system/project/avatar/250833/runner_logo.png keywords: @@ -14,4 +14,4 @@ sources: - https://gitlab.com/gitlab-org/charts/gitlab-runner - https://gitlab.com/gitlab-org/gitlab-runner - https://docs.gitlab.com/runner/ -version: 0.66.0 +version: 0.67.1 diff --git a/argocd-helm-charts/gitlab-runner/charts/gitlab-runner/templates/_env_vars.tpl b/argocd-helm-charts/gitlab-runner/charts/gitlab-runner/templates/_env_vars.tpl index f33d8e8aa..7fd425e0d 100644 --- a/argocd-helm-charts/gitlab-runner/charts/gitlab-runner/templates/_env_vars.tpl +++ b/argocd-helm-charts/gitlab-runner/charts/gitlab-runner/templates/_env_vars.tpl @@ -34,4 +34,24 @@ valueFrom: {{- toYaml $value | nindent 4 }} {{- end }} +{{- if (include "gitlab-runner.isSessionServerAllowed" .)}} +- name: SESSION_SERVER_ADDRESS + {{- if .Values.sessionServer.publicIP }} + value: {{ .Values.sessionServer.publicIP }} + {{- else if eq (include "gitlab-runner.server-session-service-type" .) "ClusterIP" }} + value: {{ printf "%s.%s.svc.cluster.local" (include "gitlab-runner.server-session-service-name" .) .Release.Namespace }} + {{- else if eq (include "gitlab-runner.server-session-service-type" .) "NodePort" }} + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + {{- else if eq (include "gitlab-runner.server-session-service-type" .) "Headless" }} + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + {{- else }} + value: + {{- end }} {{- end }} +{{- end }} \ No newline at end of file diff --git a/argocd-helm-charts/gitlab-runner/charts/gitlab-runner/templates/_helpers.tpl b/argocd-helm-charts/gitlab-runner/charts/gitlab-runner/templates/_helpers.tpl index ad8284bc6..6017c6b62 100644 --- a/argocd-helm-charts/gitlab-runner/charts/gitlab-runner/templates/_helpers.tpl +++ b/argocd-helm-charts/gitlab-runner/charts/gitlab-runner/templates/_helpers.tpl @@ -140,3 +140,18 @@ if the number of replicas is eq to 1 {{- define "gitlab-runner.isSessionServerAllowed" -}} {{- and (eq (default 1 (.Values.replicas | int64)) 1) .Values.sessionServer .Values.sessionServer.enabled -}} {{- end -}} + +{{/* +Define session server's service name. +*/}} +{{- define "gitlab-runner.server-session-service-name" }} +{{- printf "%s-%s" (include "gitlab-runner.fullname" .) "session-server"}} +{{- end -}} + +{{/*}} +Define the session server service type. +It's LoadBalancer by default. +*/}} +{{- define "gitlab-runner.server-session-service-type" }} +{{- default "LoadBalancer" .Values.sessionServer.serviceType}} +{{- end -}} \ No newline at end of file diff --git a/argocd-helm-charts/gitlab-runner/charts/gitlab-runner/templates/configmap.yaml b/argocd-helm-charts/gitlab-runner/charts/gitlab-runner/templates/configmap.yaml index 1ac71638e..34a404d2a 100644 --- a/argocd-helm-charts/gitlab-runner/charts/gitlab-runner/templates/configmap.yaml +++ b/argocd-helm-charts/gitlab-runner/charts/gitlab-runner/templates/configmap.yaml @@ -27,6 +27,10 @@ data: sh /configmaps/set-session-server-address & child=$! wait "$child" + + sh /configmaps/set-session-server-port & + child=$! + wait "$child" {{- end }} # Set up environment variables for cache @@ -98,7 +102,7 @@ data: [session_server] session_timeout = {{ include "gitlab-runner.server-session-timeout" . }} listen_address = "0.0.0.0:{{ include "gitlab-runner.server-session-internal-port" . }}" - advertise_address = "SESSION_SERVER_IP:{{ include "gitlab-runner.server-session-external-port" . }}" + advertise_address = "SESSION_SERVER_ADDRESS:SESSION_SERVER_PORT" {{- end }} {{ if .Values.runners.config }} @@ -106,15 +110,14 @@ data: {{ end }} register-the-runner: | - #!/bin/bash + #!/bin/sh signal_handler() { if [ ! -d "/proc/$register_pid" ]; then wait $register_pid fi exit } - trap 'signal_handler' SIGQUIT - trap 'signal_handler' SIGINT + trap 'signal_handler' QUIT INT MAX_REGISTER_ATTEMPTS=30 @@ -162,25 +165,80 @@ data: sleep 5 done - exit 0 - check-live: | #!/bin/bash set -eou pipefail - export CONFIG_PATH_FOR_INIT="{{ ternary "/etc/gitlab-runner/" "/home/gitlab-runner/.gitlab-runner/" (and (hasKey .Values.podSecurityContext "runAsUser") (eq 0 (.Values.podSecurityContext.runAsUser | int64))) }}" + # default timeout is 3 seconds, can be overriden + VERIFY_TIMEOUT=${1:-${VERIFY_TIMEOUT:-3}} if ! /usr/bin/pgrep -f ".*register-the-runner" > /dev/null && ! /usr/bin/pgrep -f "gitlab.*runner" > /dev/null ; then exit 1 fi - awk -F'"' '/^ name = ".*"/ { print $2 }' "${CONFIG_PATH_FOR_INIT}/config.toml" | xargs -I{} gitlab-runner verify -n {} 2>&1 | grep -E "is alive|is valid" + status=0 + # empty --url= helps `gitlab-runner verify` select all configured runners (otherwise filters for $CI_SERVER_URL) + verify_output=$(timeout "${VERIFY_TIMEOUT}" gitlab-runner verify --url= 2>&1) || status=$? + + # timeout exit code is 143 with busybox, and 124 with coreutils + if (( status == 143 )) || (( status == 124 )) ; then + echo "'gitlab-runner verify' terminated by timeout, not a conclusive failure" >&2 + exit 0 + elif (( status > 0 )) ; then + exit ${status} + fi + + grep -qE "is (alive|valid)" <<<"${verify_output}" {{- if eq (include "gitlab-runner.isSessionServerAllowed" . ) "true" }} set-session-server-address: | #!/bin/bash - {{- if (not .Values.sessionServer.publicIP) }} + if [[ -n "${SESSION_SERVER_ADDRESS}" ]]; then + sed -i -e "s/SESSION_SERVER_ADDRESS/$SESSION_SERVER_ADDRESS/g" ${CONFIG_PATH_FOR_INIT}/config.toml + else + APISERVER=https://kubernetes.default.svc \ + && SERVICEACCOUNT=/var/run/secrets/kubernetes.io/serviceaccount \ + && NAMESPACE=$(cat ${SERVICEACCOUNT}/namespace) \ + && TOKEN=$(cat ${SERVICEACCOUNT}/token) \ + && CACERT=${SERVICEACCOUNT}/ca.crt \ + && header="Authorization: Bearer ${TOKEN}" + + SERVICEURL=${APISERVER}/api/v1/namespaces/${NAMESPACE}/services/{{ include "gitlab-runner.server-session-service-name" . }} + + has_address=false + while [ "${has_address}" = false ]; do + SERVICEIP=$(curl —-silent \ + --cacert ${CACERT} \ + --header "${header}" \ + -X GET ${SERVICEURL} 2>/dev/null \ + | grep '"ip":' | cut -d ":" -f2 | xargs) + + # for aws, the hostname is available but not the external IP + SERVICEHOSTNAME=$(curl —-silent \ + --cacert ${CACERT} \ + --header "${header}" \ + -X GET ${SERVICEURL} 2>/dev/null \ + | grep '"hostname":' | cut -d ":" -f2 | xargs) + + ADDRESS="${SERVICEHOSTNAME:-$SERVICEIP}" + + if [ -z "${ADDRESS}" ] + then + echo "Service LoadBalancer External Address not yet available" + has_address=false + sleep 5 + else + has_address=true + sed -i -e "s/SESSION_SERVER_ADDRESS/$ADDRESS/g" ${CONFIG_PATH_FOR_INIT}/config.toml + fi + done + fi + + set-session-server-port: | + #!/bin/bash + + {{- if and (eq (include "gitlab-runner.server-session-service-type" .) "NodePort") (not .Values.sessionServer.nodePort)}} APISERVER=https://kubernetes.default.svc \ && SERVICEACCOUNT=/var/run/secrets/kubernetes.io/serviceaccount \ && NAMESPACE=$(cat ${SERVICEACCOUNT}/namespace) \ @@ -188,38 +246,32 @@ data: && CACERT=${SERVICEACCOUNT}/ca.crt \ && header="Authorization: Bearer ${TOKEN}" - SERVICEURL=${APISERVER}/api/v1/namespaces/${NAMESPACE}/services/{{ include "gitlab-runner.fullname" . }}-session-server - - has_address=false - while [ "${has_address}" = false ]; do - SERVICEIP=$(curl —-silent \ - --cacert ${CACERT} \ - --header "${header}" \ - -X GET ${SERVICEURL} 2>/dev/null \ - | grep '"ip":' | cut -d ":" -f2 | xargs) - - # for aws, the hostname is available but not the external IP - SERVICEHOSTNAME=$(curl —-silent \ - --cacert ${CACERT} \ - --header "${header}" \ - -X GET ${SERVICEURL} 2>/dev/null \ - | grep '"hostname":' | cut -d ":" -f2 | xargs) - - ADDRESS="${SERVICEHOSTNAME:-$SERVICEIP}" - - if [ -z "${ADDRESS}" ] - then - echo "Service LoadBalancer External Address not yet available" - has_address=false + SERVICEURL=${APISERVER}/api/v1/namespaces/${NAMESPACE}/services/{{ include "gitlab-runner.server-session-service-name" . }} + + has_port=false + while [ "${has_port}" = false ]; do + SERVICEPORT=$(curl —-silent \ + --cacert ${CACERT} \ + --header "${header}" \ + -X GET ${SERVICEURL} 2>/dev/null \ + | grep '"nodePort":' | cut -d ":" -f2 | xargs) + if [ -z "${SERVICEPORT}" ]; then + echo "Service nodePort not yet available" sleep 5 else - has_address=true - sed -i -e "s/SESSION_SERVER_IP/${ADDRESS}/g" ${CONFIG_PATH_FOR_INIT}/config.toml + has_port=true + sed -i -e "s/SESSION_SERVER_PORT/${SERVICEPORT}/g" ${CONFIG_PATH_FOR_INIT}/config.toml fi done + + {{- else if .Values.sessionServer.nodePort}} + sed -i -e "s/SESSION_SERVER_PORT/{{.Values.sessionServer.nodePort}}/g" ${CONFIG_PATH_FOR_INIT}/config.toml + {{- else }} - sed -i -e "s/SESSION_SERVER_IP/{{ .Values.sessionServer.publicIP }}/g" ${CONFIG_PATH_FOR_INIT}/config.toml - {{- end}} + sed -i -e "s/SESSION_SERVER_PORT/{{include "gitlab-runner.server-session-external-port" .}}/g" ${CONFIG_PATH_FOR_INIT}/config.toml + + {{- end }} + {{ end }} pre-entrypoint-script: | diff --git a/argocd-helm-charts/gitlab-runner/charts/gitlab-runner/templates/deployment.yaml b/argocd-helm-charts/gitlab-runner/charts/gitlab-runner/templates/deployment.yaml index 7a4328aaa..4bc7b08a8 100644 --- a/argocd-helm-charts/gitlab-runner/charts/gitlab-runner/templates/deployment.yaml +++ b/argocd-helm-charts/gitlab-runner/charts/gitlab-runner/templates/deployment.yaml @@ -79,20 +79,20 @@ spec: {{- include "gitlab-runner.runner-env-vars" . | indent 8 }} livenessProbe: exec: - command: ["/bin/bash", "/configmaps/check-live"] - initialDelaySeconds: 60 - timeoutSeconds: {{ default 3 .Values.probeTimeoutSeconds }} - periodSeconds: 10 - successThreshold: 1 - failureThreshold: 3 + command: ["/bin/bash", "/configmaps/check-live", "{{ default 4 .Values.probeTimeoutSeconds | add -1 | max 1 }}"] + initialDelaySeconds: {{ default 60 .Values.livenessProbe.initialDelaySeconds }} + timeoutSeconds: {{ default 4 .Values.probeTimeoutSeconds }} + periodSeconds: {{ default 10 .Values.livenessProbe.periodSeconds }} + successThreshold: {{ default 1 .Values.livenessProbe.successThreshold }} + failureThreshold: {{ default 3 .Values.livenessProbe.failureThreshold }} readinessProbe: exec: command: ["/usr/bin/pgrep","gitlab.*runner"] - initialDelaySeconds: 10 - timeoutSeconds: {{ default 3 .Values.probeTimeoutSeconds }} - periodSeconds: 10 - successThreshold: 1 - failureThreshold: 3 + initialDelaySeconds: {{ default 60 .Values.readinessProbe.initialDelaySeconds }} + timeoutSeconds: {{ default 4 .Values.probeTimeoutSeconds }} + periodSeconds: {{ default 10 .Values.readinessProbe.periodSeconds }} + successThreshold: {{ default 1 .Values.readinessProbe.successThreshold }} + failureThreshold: {{ default 3 .Values.readinessProbe.failureThreshold }} ports: - name: {{ .Values.metrics.portName | quote }} containerPort: {{ .Values.metrics.port }} diff --git a/argocd-helm-charts/gitlab-runner/charts/gitlab-runner/templates/service-session-server.yaml b/argocd-helm-charts/gitlab-runner/charts/gitlab-runner/templates/service-session-server.yaml index 7f9070110..8ecc1accd 100644 --- a/argocd-helm-charts/gitlab-runner/charts/gitlab-runner/templates/service-session-server.yaml +++ b/argocd-helm-charts/gitlab-runner/charts/gitlab-runner/templates/service-session-server.yaml @@ -17,7 +17,8 @@ spec: selector: app: {{ include "gitlab-runner.fullname" . }} release: "{{ .Release.Name }}" - type: LoadBalancer + type: {{ (eq (include "gitlab-runner.server-session-service-type" .) "Headless") | ternary "ClusterIP" (include "gitlab-runner.server-session-service-type" .) }} + {{- if eq (include "gitlab-runner.server-session-service-type" .) "LoadBalancer"}} {{- if .Values.sessionServer.publicIP }} loadBalancerIP: {{ .Values.sessionServer.publicIP }} {{- end }} @@ -25,8 +26,15 @@ spec: loadBalancerSourceRanges: {{- toYaml .Values.sessionServer.loadBalancerSourceRanges | nindent 4 }} {{- end }} + {{- end }} + {{- if eq (include "gitlab-runner.server-session-service-type" .) "Headless" }} + clusterIP: None + {{- end }} ports: - protocol: TCP port: {{ include "gitlab-runner.server-session-external-port" . }} targetPort: {{ include "gitlab-runner.server-session-internal-port" . }} + {{- if .Values.sessionServer.nodePort }} + nodePort: {{ .Values.sessionServer.nodePort }} + {{- end }} {{- end }} diff --git a/argocd-helm-charts/gitlab-runner/charts/gitlab-runner/values.yaml b/argocd-helm-charts/gitlab-runner/charts/gitlab-runner/values.yaml index f68217ca1..8974632f0 100644 --- a/argocd-helm-charts/gitlab-runner/charts/gitlab-runner/values.yaml +++ b/argocd-helm-charts/gitlab-runner/charts/gitlab-runner/values.yaml @@ -37,7 +37,21 @@ imagePullPolicy: IfNotPresent # - name: "image-pull-secret" ## Timeout, in seconds, for liveness and readiness probes of a runner pod. -# probeTimeoutSeconds: 3 +# probeTimeoutSeconds: 4 + +## Configure the livenessProbe +livenessProbe: {} +# initialDelaySeconds: 60 +# periodSeconds: 10 +# successThreshold: 1 +# failureThreshold: 3 + +## Configure the readinessProbe +readinessProbe: {} +# initialDelaySeconds: 60 +# periodSeconds: 10 +# successThreshold: 1 +# failureThreshold: 3 ## How many runner pods to launch. ## @@ -143,10 +157,17 @@ sessionServer: # timeout: 1800 # internalPort: 8093 # externalPort: 9000 + + #In case sessionServer.serviceType is NodePort. If not defined, auto NodePort will be assigned. + # nodePort: 30093 + # publicIP: "" # loadBalancerSourceRanges: # - 1.2.3.4/32 + #Valid values: ClusterIP, Headless, NodePort, LoadBalancer + serviceType: LoadBalancer + ## For RBAC support: rbac: ## Specifies whether a Role and RoleBinding should be created @@ -622,12 +643,12 @@ deploymentLabels: {} # owner.team: ## Lifecycle options to be added to deployment -## +## deploymentLifecycle: {} # Example # preStop: # exec: - # command: ["/bin/sh", "-c", "echo 'shutting down'"] + # command: ["/bin/sh", "-c", "echo 'shutting down'"] ## Set hostname for runner pods #hostname: my-gitlab-runner @@ -722,4 +743,4 @@ extraObjects: [] # runner-token: "{{`{{ .runnerToken }}`}}" # dataFrom: # - extract: -# key: my-secret-store-secret \ No newline at end of file +# key: my-secret-store-secret From 307d4334ff333db8172f820c802767cd892d28f0 Mon Sep 17 00:00:00 2001 From: Hritik Batra Date: Wed, 31 Jul 2024 20:45:06 +0530 Subject: [PATCH 11/25] [CI] Helm Chart Update matomo --- CHANGELOG.md | 1 + argocd-helm-charts/matomo/Chart.lock | 6 +++--- argocd-helm-charts/matomo/Chart.yaml | 2 +- argocd-helm-charts/matomo/charts/matomo/Chart.lock | 8 ++++---- argocd-helm-charts/matomo/charts/matomo/Chart.yaml | 10 +++++----- argocd-helm-charts/matomo/charts/matomo/README.md | 7 ++++++- .../matomo/charts/matomo/charts/common/Chart.yaml | 4 ++-- .../matomo/charts/matomo/charts/common/README.md | 2 +- .../matomo/charts/common/templates/_resources.tpl | 14 +++++++------- .../matomo/charts/common/templates/_storage.tpl | 7 +++---- .../matomo/charts/matomo/charts/mariadb/Chart.lock | 6 +++--- .../matomo/charts/matomo/charts/mariadb/Chart.yaml | 10 +++++----- .../matomo/charts/matomo/charts/mariadb/README.md | 3 ++- .../matomo/charts/mariadb/charts/common/Chart.yaml | 4 ++-- .../matomo/charts/mariadb/charts/common/README.md | 2 +- .../mariadb/charts/common/templates/_resources.tpl | 14 +++++++------- .../mariadb/charts/common/templates/_storage.tpl | 7 +++---- .../charts/matomo/charts/mariadb/values.yaml | 10 ++++++---- .../matomo/charts/matomo/templates/_helpers.tpl | 6 +----- .../matomo/charts/matomo/templates/cronjob.yaml | 4 ++-- .../matomo/charts/matomo/templates/deployment.yaml | 2 +- .../matomo/charts/matomo/values.yaml | 12 +++++++----- 22 files changed, 73 insertions(+), 68 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 32ce2b011..1f751c783 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,3 +14,4 @@ All releases and the changes included in them (pulled from git commits added sin - Updated: external-dns from version 8.1.0 to 8.3.3 - Updated: fluent-bit from version 0.46.11 to 0.47.5 - Updated: gitlab-runner from version 0.66.0 to 0.67.1 +- Updated: matomo from version 7.3.7 to 8.0.5 diff --git a/argocd-helm-charts/matomo/Chart.lock b/argocd-helm-charts/matomo/Chart.lock index 1b4b892e4..f88dc2a82 100644 --- a/argocd-helm-charts/matomo/Chart.lock +++ b/argocd-helm-charts/matomo/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: matomo repository: https://charts.bitnami.com/bitnami - version: 7.3.7 -digest: sha256:1ae4a3b84c9d6c87ff79ecfaa0fa819701e69d357fa191b851163317de04861a -generated: "2024-07-09T02:55:44.947675698+05:30" + version: 8.0.5 +digest: sha256:e848b96404c97beeab2aea1cd23ca4da06c2f2b2804f569adb0ad4708dd446c3 +generated: "2024-07-31T20:44:56.109619453+05:30" diff --git a/argocd-helm-charts/matomo/Chart.yaml b/argocd-helm-charts/matomo/Chart.yaml index d64bb4aad..84ceb6661 100644 --- a/argocd-helm-charts/matomo/Chart.yaml +++ b/argocd-helm-charts/matomo/Chart.yaml @@ -3,5 +3,5 @@ name: matomo version: 2.1.0 dependencies: - name: matomo - version: 7.3.7 + version: 8.0.5 repository: https://charts.bitnami.com/bitnami diff --git a/argocd-helm-charts/matomo/charts/matomo/Chart.lock b/argocd-helm-charts/matomo/charts/matomo/Chart.lock index 4d7ab948c..fde47ac1b 100644 --- a/argocd-helm-charts/matomo/charts/matomo/Chart.lock +++ b/argocd-helm-charts/matomo/charts/matomo/Chart.lock @@ -1,9 +1,9 @@ dependencies: - name: mariadb repository: oci://registry-1.docker.io/bitnamicharts - version: 18.2.6 + version: 19.0.3 - name: common repository: oci://registry-1.docker.io/bitnamicharts - version: 2.20.3 -digest: sha256:45fa124844332106477d92c0d7d13a56ee154d7ae4021e7c80ddc27ce2d39eee -generated: "2024-07-03T07:19:46.677718426Z" + version: 2.20.5 +digest: sha256:bdb3bb68e405ddb99a5ca3861b3600439252bd1fa7e47301807c0cb13be89fc2 +generated: "2024-07-25T08:56:54.795024752Z" diff --git a/argocd-helm-charts/matomo/charts/matomo/Chart.yaml b/argocd-helm-charts/matomo/charts/matomo/Chart.yaml index 53ab94885..02ef61a6d 100644 --- a/argocd-helm-charts/matomo/charts/matomo/Chart.yaml +++ b/argocd-helm-charts/matomo/charts/matomo/Chart.yaml @@ -2,11 +2,11 @@ annotations: category: Analytics images: | - name: apache-exporter - image: docker.io/bitnami/apache-exporter:1.0.8-debian-12-r2 + image: docker.io/bitnami/apache-exporter:1.0.8-debian-12-r6 - name: matomo - image: docker.io/bitnami/matomo:5.1.0-debian-12-r1 + image: docker.io/bitnami/matomo:5.1.0-debian-12-r6 - name: os-shell - image: docker.io/bitnami/os-shell:12-debian-12-r24 + image: docker.io/bitnami/os-shell:12-debian-12-r26 licenses: Apache-2.0 apiVersion: v2 appVersion: 5.1.0 @@ -14,7 +14,7 @@ dependencies: - condition: mariadb.enabled name: mariadb repository: oci://registry-1.docker.io/bitnamicharts - version: 18.x.x + version: 19.x.x - name: common repository: oci://registry-1.docker.io/bitnamicharts tags: @@ -37,4 +37,4 @@ maintainers: name: matomo sources: - https://github.com/bitnami/charts/tree/main/bitnami/matomo -version: 7.3.7 +version: 8.0.5 diff --git a/argocd-helm-charts/matomo/charts/matomo/README.md b/argocd-helm-charts/matomo/charts/matomo/README.md index d84bf5ff1..bea449a77 100644 --- a/argocd-helm-charts/matomo/charts/matomo/README.md +++ b/argocd-helm-charts/matomo/charts/matomo/README.md @@ -131,7 +131,8 @@ helm install my-release --set persistence.existingClaim=PVC_NAME oci://REGISTRY_ | ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------ | | `global.imageRegistry` | Global Docker image registry | `""` | | `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | -| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | +| `global.defaultStorageClass` | Global default StorageClass for Persistent Volume(s) | `""` | +| `global.storageClass` | DEPRECATED: use global.defaultStorageClass instead | `""` | | `global.compatibility.openshift.adaptSecurityContext` | Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) | `auto` | ### Common parameters @@ -466,6 +467,10 @@ Find more information about how to deal with common errors related to Bitnami's ## Upgrading +### To 8.0.0 + +This major release bumps the MariaDB version to 11.4. Follow the [upstream instructions](https://mariadb.com/kb/en/upgrading-from-mariadb-11-3-to-mariadb-11-4/) for upgrading from MariaDB 11.3 to 11.4. No major issues are expected during the upgrade. + ### To 7.0.0 This major bump changes the following security defaults: diff --git a/argocd-helm-charts/matomo/charts/matomo/charts/common/Chart.yaml b/argocd-helm-charts/matomo/charts/matomo/charts/common/Chart.yaml index 23ba4e4e7..dabd80681 100644 --- a/argocd-helm-charts/matomo/charts/matomo/charts/common/Chart.yaml +++ b/argocd-helm-charts/matomo/charts/matomo/charts/common/Chart.yaml @@ -2,7 +2,7 @@ annotations: category: Infrastructure licenses: Apache-2.0 apiVersion: v2 -appVersion: 2.20.3 +appVersion: 2.20.5 description: A Library Helm Chart for grouping common logic between bitnami charts. This chart is not deployable by itself. home: https://bitnami.com @@ -20,4 +20,4 @@ name: common sources: - https://github.com/bitnami/charts/tree/main/bitnami/common type: library -version: 2.20.3 +version: 2.20.5 diff --git a/argocd-helm-charts/matomo/charts/matomo/charts/common/README.md b/argocd-helm-charts/matomo/charts/matomo/charts/common/README.md index 82d78a384..fee26c991 100644 --- a/argocd-helm-charts/matomo/charts/matomo/charts/common/README.md +++ b/argocd-helm-charts/matomo/charts/matomo/charts/common/README.md @@ -24,7 +24,7 @@ data: myvalue: "Hello World" ``` -Looking to use our applications in production? Try [VMware Tanzu Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog. +Looking to use our applications in production? Try [VMware Tanzu Application Catalog](https://bitnami.com/enterprise), the commercial edition of the Bitnami catalog. ## Introduction diff --git a/argocd-helm-charts/matomo/charts/matomo/charts/common/templates/_resources.tpl b/argocd-helm-charts/matomo/charts/matomo/charts/common/templates/_resources.tpl index b4491f782..d8a43e1c2 100644 --- a/argocd-helm-charts/matomo/charts/matomo/charts/common/templates/_resources.tpl +++ b/argocd-helm-charts/matomo/charts/matomo/charts/common/templates/_resources.tpl @@ -15,31 +15,31 @@ These presets are for basic testing and not meant to be used in production {{- $presets := dict "nano" (dict "requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi") ) "micro" (dict "requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi") ) "small" (dict "requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi") ) "medium" (dict "requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi") ) "large" (dict "requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi") ) "xlarge" (dict "requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi") ) "2xlarge" (dict "requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi") ) }} {{- if hasKey $presets .type -}} diff --git a/argocd-helm-charts/matomo/charts/matomo/charts/common/templates/_storage.tpl b/argocd-helm-charts/matomo/charts/matomo/charts/common/templates/_storage.tpl index 7780da18b..aa75856c0 100644 --- a/argocd-helm-charts/matomo/charts/matomo/charts/common/templates/_storage.tpl +++ b/argocd-helm-charts/matomo/charts/matomo/charts/common/templates/_storage.tpl @@ -4,19 +4,18 @@ SPDX-License-Identifier: APACHE-2.0 */}} {{/* vim: set filetype=mustache: */}} + {{/* Return the proper Storage Class {{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} */}} {{- define "common.storage.class" -}} - -{{- $storageClass := default .persistence.storageClass ((.global).storageClass) -}} +{{- $storageClass := (.global).storageClass | default .persistence.storageClass | default (.global).defaultStorageClass | default "" -}} {{- if $storageClass -}} {{- if (eq "-" $storageClass) -}} {{- printf "storageClassName: \"\"" -}} - {{- else }} + {{- else -}} {{- printf "storageClassName: %s" $storageClass -}} {{- end -}} {{- end -}} - {{- end -}} diff --git a/argocd-helm-charts/matomo/charts/matomo/charts/mariadb/Chart.lock b/argocd-helm-charts/matomo/charts/matomo/charts/mariadb/Chart.lock index d2b5309dc..6a6e6d250 100644 --- a/argocd-helm-charts/matomo/charts/matomo/charts/mariadb/Chart.lock +++ b/argocd-helm-charts/matomo/charts/matomo/charts/mariadb/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: common repository: oci://registry-1.docker.io/bitnamicharts - version: 2.20.3 -digest: sha256:569e1c9d81abdcad3891e065c0f23c83786527d2043f2bc68193c43d18886c19 -generated: "2024-06-18T11:49:11.933701908Z" + version: 2.20.5 +digest: sha256:5b98791747a148b9d4956b81bb8635f49a0ae831869d700d52e514b8fd1a2445 +generated: "2024-07-16T12:11:29.315343+02:00" diff --git a/argocd-helm-charts/matomo/charts/matomo/charts/mariadb/Chart.yaml b/argocd-helm-charts/matomo/charts/matomo/charts/mariadb/Chart.yaml index 038aa88be..6c94e7d39 100644 --- a/argocd-helm-charts/matomo/charts/matomo/charts/mariadb/Chart.yaml +++ b/argocd-helm-charts/matomo/charts/matomo/charts/mariadb/Chart.yaml @@ -2,14 +2,14 @@ annotations: category: Database images: | - name: mariadb - image: docker.io/bitnami/mariadb:11.3.2-debian-12-r9 + image: docker.io/bitnami/mariadb:11.4.2-debian-12-r2 - name: mysqld-exporter - image: docker.io/bitnami/mysqld-exporter:0.15.1-debian-12-r25 + image: docker.io/bitnami/mysqld-exporter:0.15.1-debian-12-r29 - name: os-shell - image: docker.io/bitnami/os-shell:12-debian-12-r24 + image: docker.io/bitnami/os-shell:12-debian-12-r26 licenses: Apache-2.0 apiVersion: v2 -appVersion: 11.3.2 +appVersion: 11.4.2 dependencies: - name: common repository: oci://registry-1.docker.io/bitnamicharts @@ -33,4 +33,4 @@ maintainers: name: mariadb sources: - https://github.com/bitnami/charts/tree/main/bitnami/mariadb -version: 18.2.6 +version: 19.0.3 diff --git a/argocd-helm-charts/matomo/charts/matomo/charts/mariadb/README.md b/argocd-helm-charts/matomo/charts/matomo/charts/mariadb/README.md index d26dd56b2..9e4ae8aaf 100644 --- a/argocd-helm-charts/matomo/charts/matomo/charts/mariadb/README.md +++ b/argocd-helm-charts/matomo/charts/matomo/charts/mariadb/README.md @@ -148,7 +148,8 @@ As an alternative, this chart supports using an initContainer to change the owne | ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------ | | `global.imageRegistry` | Global Docker Image registry | `""` | | `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | -| `global.storageClass` | Global storage class for dynamic provisioning | `""` | +| `global.defaultStorageClass` | Global default StorageClass for Persistent Volume(s) | `""` | +| `global.storageClass` | DEPRECATED: use global.defaultStorageClass instead | `""` | | `global.compatibility.openshift.adaptSecurityContext` | Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) | `auto` | ### Common parameters diff --git a/argocd-helm-charts/matomo/charts/matomo/charts/mariadb/charts/common/Chart.yaml b/argocd-helm-charts/matomo/charts/matomo/charts/mariadb/charts/common/Chart.yaml index 23ba4e4e7..dabd80681 100644 --- a/argocd-helm-charts/matomo/charts/matomo/charts/mariadb/charts/common/Chart.yaml +++ b/argocd-helm-charts/matomo/charts/matomo/charts/mariadb/charts/common/Chart.yaml @@ -2,7 +2,7 @@ annotations: category: Infrastructure licenses: Apache-2.0 apiVersion: v2 -appVersion: 2.20.3 +appVersion: 2.20.5 description: A Library Helm Chart for grouping common logic between bitnami charts. This chart is not deployable by itself. home: https://bitnami.com @@ -20,4 +20,4 @@ name: common sources: - https://github.com/bitnami/charts/tree/main/bitnami/common type: library -version: 2.20.3 +version: 2.20.5 diff --git a/argocd-helm-charts/matomo/charts/matomo/charts/mariadb/charts/common/README.md b/argocd-helm-charts/matomo/charts/matomo/charts/mariadb/charts/common/README.md index 82d78a384..fee26c991 100644 --- a/argocd-helm-charts/matomo/charts/matomo/charts/mariadb/charts/common/README.md +++ b/argocd-helm-charts/matomo/charts/matomo/charts/mariadb/charts/common/README.md @@ -24,7 +24,7 @@ data: myvalue: "Hello World" ``` -Looking to use our applications in production? Try [VMware Tanzu Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog. +Looking to use our applications in production? Try [VMware Tanzu Application Catalog](https://bitnami.com/enterprise), the commercial edition of the Bitnami catalog. ## Introduction diff --git a/argocd-helm-charts/matomo/charts/matomo/charts/mariadb/charts/common/templates/_resources.tpl b/argocd-helm-charts/matomo/charts/matomo/charts/mariadb/charts/common/templates/_resources.tpl index b4491f782..d8a43e1c2 100644 --- a/argocd-helm-charts/matomo/charts/matomo/charts/mariadb/charts/common/templates/_resources.tpl +++ b/argocd-helm-charts/matomo/charts/matomo/charts/mariadb/charts/common/templates/_resources.tpl @@ -15,31 +15,31 @@ These presets are for basic testing and not meant to be used in production {{- $presets := dict "nano" (dict "requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi") ) "micro" (dict "requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi") ) "small" (dict "requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi") ) "medium" (dict "requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi") ) "large" (dict "requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi") ) "xlarge" (dict "requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi") ) "2xlarge" (dict "requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi") ) }} {{- if hasKey $presets .type -}} diff --git a/argocd-helm-charts/matomo/charts/matomo/charts/mariadb/charts/common/templates/_storage.tpl b/argocd-helm-charts/matomo/charts/matomo/charts/mariadb/charts/common/templates/_storage.tpl index 7780da18b..aa75856c0 100644 --- a/argocd-helm-charts/matomo/charts/matomo/charts/mariadb/charts/common/templates/_storage.tpl +++ b/argocd-helm-charts/matomo/charts/matomo/charts/mariadb/charts/common/templates/_storage.tpl @@ -4,19 +4,18 @@ SPDX-License-Identifier: APACHE-2.0 */}} {{/* vim: set filetype=mustache: */}} + {{/* Return the proper Storage Class {{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} */}} {{- define "common.storage.class" -}} - -{{- $storageClass := default .persistence.storageClass ((.global).storageClass) -}} +{{- $storageClass := (.global).storageClass | default .persistence.storageClass | default (.global).defaultStorageClass | default "" -}} {{- if $storageClass -}} {{- if (eq "-" $storageClass) -}} {{- printf "storageClassName: \"\"" -}} - {{- else }} + {{- else -}} {{- printf "storageClassName: %s" $storageClass -}} {{- end -}} {{- end -}} - {{- end -}} diff --git a/argocd-helm-charts/matomo/charts/matomo/charts/mariadb/values.yaml b/argocd-helm-charts/matomo/charts/matomo/charts/mariadb/values.yaml index e73ca3aa8..ae8747fdb 100644 --- a/argocd-helm-charts/matomo/charts/matomo/charts/mariadb/values.yaml +++ b/argocd-helm-charts/matomo/charts/matomo/charts/mariadb/values.yaml @@ -9,7 +9,8 @@ ## @param global.imageRegistry Global Docker Image registry ## @param global.imagePullSecrets Global Docker registry secret names as an array -## @param global.storageClass Global storage class for dynamic provisioning +## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s) +## @param global.storageClass DEPRECATED: use global.defaultStorageClass instead ## global: imageRegistry: "" @@ -18,6 +19,7 @@ global: ## - myRegistryKeySecretName ## imagePullSecrets: [] + defaultStorageClass: "" storageClass: "" ## Compatibility adaptations for Kubernetes platforms ## @@ -95,7 +97,7 @@ serviceBindings: image: registry: docker.io repository: bitnami/mariadb - tag: 11.3.2-debian-12-r9 + tag: 11.4.2-debian-12-r2 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' @@ -1053,7 +1055,7 @@ volumePermissions: image: registry: docker.io repository: bitnami/os-shell - tag: 12-debian-12-r24 + tag: 12-debian-12-r26 digest: "" pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) @@ -1097,7 +1099,7 @@ metrics: image: registry: docker.io repository: bitnami/mysqld-exporter - tag: 0.15.1-debian-12-r25 + tag: 0.15.1-debian-12-r29 digest: "" pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) diff --git a/argocd-helm-charts/matomo/charts/matomo/templates/_helpers.tpl b/argocd-helm-charts/matomo/charts/matomo/templates/_helpers.tpl index 7934d7b47..e6848b66a 100644 --- a/argocd-helm-charts/matomo/charts/matomo/templates/_helpers.tpl +++ b/argocd-helm-charts/matomo/charts/matomo/templates/_helpers.tpl @@ -172,10 +172,6 @@ Return the matomo pods needed initContainers - name: certificates image: {{ template "certificates.image" . }} imagePullPolicy: {{ default .Values.image.pullPolicy .Values.certificates.image.pullPolicy }} - imagePullSecrets: - {{- range (default .Values.image.pullSecrets .Values.certificates.image.pullSecrets) }} - - name: {{ . }} - {{- end }} securityContext: runAsUser: 0 {{- if .Values.certificates.command }} @@ -190,7 +186,7 @@ Return the matomo pods needed initContainers - sh - -c - install_packages ca-certificates openssl - && openssl req -new -x509 -days 3650 -nodes -sha256 + && openssl req -new -x509 -days 3650 -nodes -sha256 -subj "/CN=$(hostname)" -addext "subjectAltName = DNS:$(hostname)" -out /etc/ssl/certs/ssl-cert-snakeoil.pem -keyout /etc/ssl/private/ssl-cert-snakeoil.key -extensions v3_req diff --git a/argocd-helm-charts/matomo/charts/matomo/templates/cronjob.yaml b/argocd-helm-charts/matomo/charts/matomo/templates/cronjob.yaml index 2e8e094b9..42bc7bc57 100644 --- a/argocd-helm-charts/matomo/charts/matomo/templates/cronjob.yaml +++ b/argocd-helm-charts/matomo/charts/matomo/templates/cronjob.yaml @@ -45,7 +45,7 @@ spec: tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.cronjobs.archive.tolerations "context" $) | nindent 12 }} {{- end }} initContainers: - {{ include "matomo.initContainers" . | nindent 12 }} + {{- include "matomo.initContainers" . | nindent 12 }} {{- if .Values.cronjobs.archive.podSecurityContext.enabled }} securityContext: {{- omit .Values.cronjobs.archive.podSecurityContext "enabled" | toYaml | nindent 16 }} {{- end }} @@ -217,7 +217,7 @@ spec: {{- end }} restartPolicy: OnFailure initContainers: - {{ include "matomo.initContainers" . | nindent 12 }} + {{- include "matomo.initContainers" . | nindent 12 }} {{- if .Values.cronjobs.taskScheduler.podSecurityContext.enabled }} securityContext: {{- omit .Values.cronjobs.taskScheduler.podSecurityContext "enabled" | toYaml | nindent 16 }} {{- end }} diff --git a/argocd-helm-charts/matomo/charts/matomo/templates/deployment.yaml b/argocd-helm-charts/matomo/charts/matomo/templates/deployment.yaml index 4b95f33d4..b5acae8d8 100644 --- a/argocd-helm-charts/matomo/charts/matomo/templates/deployment.yaml +++ b/argocd-helm-charts/matomo/charts/matomo/templates/deployment.yaml @@ -71,7 +71,7 @@ spec: {{- if .Values.initContainers }} {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }} {{- end }} - {{ include "matomo.initContainers" . | nindent 8 }} + {{- include "matomo.initContainers" . | nindent 8 }} containers: - name: {{ include "common.names.fullname" . }} image: {{ template "matomo.image" . }} diff --git a/argocd-helm-charts/matomo/charts/matomo/values.yaml b/argocd-helm-charts/matomo/charts/matomo/values.yaml index dec9fd41f..bb6fc3d20 100644 --- a/argocd-helm-charts/matomo/charts/matomo/values.yaml +++ b/argocd-helm-charts/matomo/charts/matomo/values.yaml @@ -9,7 +9,8 @@ ## @param global.imageRegistry Global Docker image registry ## @param global.imagePullSecrets Global Docker registry secret names as an array -## @param global.storageClass Global StorageClass for Persistent Volume(s) +## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s) +## @param global.storageClass DEPRECATED: use global.defaultStorageClass instead ## global: imageRegistry: "" @@ -18,6 +19,7 @@ global: ## - myRegistryKeySecretName ## imagePullSecrets: [] + defaultStorageClass: "" storageClass: "" ## Compatibility adaptations for Kubernetes platforms ## @@ -68,7 +70,7 @@ extraDeploy: [] image: registry: docker.io repository: bitnami/matomo - tag: 5.1.0-debian-12-r1 + tag: 5.1.0-debian-12-r6 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' @@ -745,7 +747,7 @@ volumePermissions: image: registry: docker.io repository: bitnami/os-shell - tag: 12-debian-12-r24 + tag: 12-debian-12-r26 digest: "" pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. @@ -796,7 +798,7 @@ metrics: image: registry: docker.io repository: bitnami/apache-exporter - tag: 1.0.8-debian-12-r2 + tag: 1.0.8-debian-12-r6 digest: "" pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. @@ -878,7 +880,7 @@ certificates: image: registry: docker.io repository: bitnami/os-shell - tag: 12-debian-12-r24 + tag: 12-debian-12-r26 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' From abfb9a9fb30169b420b7dfac1346c82468f19606 Mon Sep 17 00:00:00 2001 From: Hritik Batra Date: Wed, 31 Jul 2024 20:45:42 +0530 Subject: [PATCH 12/25] [CI] Helm Chart Update mattermost-team-edition --- CHANGELOG.md | 1 + argocd-helm-charts/mattermost-team-edition/Chart.yaml | 2 +- .../charts/mattermost-team-edition/Chart.lock | 2 +- .../charts/mattermost-team-edition/Chart.yaml | 4 ++-- .../charts/mattermost-team-edition/values.yaml | 2 +- .../mattermost-team-edition/requirements.lock | 6 +++--- 6 files changed, 9 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1f751c783..4a3e8c0ea 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,3 +15,4 @@ All releases and the changes included in them (pulled from git commits added sin - Updated: fluent-bit from version 0.46.11 to 0.47.5 - Updated: gitlab-runner from version 0.66.0 to 0.67.1 - Updated: matomo from version 7.3.7 to 8.0.5 +- Updated: mattermost-team-edition from version 6.6.58 to 6.6.60 diff --git a/argocd-helm-charts/mattermost-team-edition/Chart.yaml b/argocd-helm-charts/mattermost-team-edition/Chart.yaml index 70828e951..bc0d5c2a9 100644 --- a/argocd-helm-charts/mattermost-team-edition/Chart.yaml +++ b/argocd-helm-charts/mattermost-team-edition/Chart.yaml @@ -3,5 +3,5 @@ name: mattermost-team-edition version: 6.6.6 dependencies: - name: mattermost-team-edition - version: 6.6.58 + version: 6.6.60 repository: https://helm.mattermost.com diff --git a/argocd-helm-charts/mattermost-team-edition/charts/mattermost-team-edition/Chart.lock b/argocd-helm-charts/mattermost-team-edition/charts/mattermost-team-edition/Chart.lock index 5403769ff..495ec39e3 100644 --- a/argocd-helm-charts/mattermost-team-edition/charts/mattermost-team-edition/Chart.lock +++ b/argocd-helm-charts/mattermost-team-edition/charts/mattermost-team-edition/Chart.lock @@ -3,4 +3,4 @@ dependencies: repository: https://charts.helm.sh/stable version: 1.6.4 digest: sha256:01345d74dd8069b6c142c7c52e67ea41ede74613386426f7217e14efcc65a1b3 -generated: "2024-07-02T14:18:10.732436145Z" +generated: "2024-07-22T15:15:24.153886224Z" diff --git a/argocd-helm-charts/mattermost-team-edition/charts/mattermost-team-edition/Chart.yaml b/argocd-helm-charts/mattermost-team-edition/charts/mattermost-team-edition/Chart.yaml index 824622fd6..37fef9d85 100644 --- a/argocd-helm-charts/mattermost-team-edition/charts/mattermost-team-edition/Chart.yaml +++ b/argocd-helm-charts/mattermost-team-edition/charts/mattermost-team-edition/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: 9.9.1 +appVersion: 9.10.1 dependencies: - condition: mysql.enabled name: mysql @@ -17,4 +17,4 @@ sources: - https://github.com/mattermost/mattermost-server - https://github.com/mattermost/mattermost-helm type: application -version: 6.6.58 +version: 6.6.60 diff --git a/argocd-helm-charts/mattermost-team-edition/charts/mattermost-team-edition/values.yaml b/argocd-helm-charts/mattermost-team-edition/charts/mattermost-team-edition/values.yaml index 4a65704b5..51a1a64cb 100644 --- a/argocd-helm-charts/mattermost-team-edition/charts/mattermost-team-edition/values.yaml +++ b/argocd-helm-charts/mattermost-team-edition/charts/mattermost-team-edition/values.yaml @@ -3,7 +3,7 @@ # Declare variables to be passed into your templates. image: repository: mattermost/mattermost-team-edition - tag: 9.9.1@sha256:0f68caa9843c517d0f00391f4857a2626e9f435c67eb3a8a655eeb28bd110742 + tag: 9.10.1@sha256:d2fafab2decfa6d0d6cf6796962c23563263e210b23b3c18ca3f81efc712a850 imagePullPolicy: IfNotPresent initContainerImage: diff --git a/argocd-helm-charts/mattermost-team-edition/requirements.lock b/argocd-helm-charts/mattermost-team-edition/requirements.lock index 22a6901e5..c798b14c3 100644 --- a/argocd-helm-charts/mattermost-team-edition/requirements.lock +++ b/argocd-helm-charts/mattermost-team-edition/requirements.lock @@ -1,6 +1,6 @@ dependencies: - name: mattermost-team-edition repository: https://helm.mattermost.com - version: 6.6.58 -digest: sha256:ea399403cc9e03be97797c42ca5b625f3c92764d23520b6807f9c23526f74490 -generated: "2024-07-09T02:55:40.105208795+05:30" + version: 6.6.60 +digest: sha256:50202583c69275e0bdff33bef3fe2e09cfa6494e1bb8f3915f4b7be9cc9d464a +generated: "2024-07-31T20:45:28.806702274+05:30" From 20422663a803a9afc6a0bc2ad096cb9c0d968dee Mon Sep 17 00:00:00 2001 From: Hritik Batra Date: Wed, 31 Jul 2024 20:46:18 +0530 Subject: [PATCH 13/25] [CI] Helm Chart Update metallb --- CHANGELOG.md | 1 + argocd-helm-charts/metallb/Chart.lock | 6 +++--- argocd-helm-charts/metallb/Chart.yaml | 2 +- .../metallb/charts/metallb/Chart.lock | 6 +++--- .../metallb/charts/metallb/Chart.yaml | 2 +- .../charts/metallb/charts/common/Chart.yaml | 4 ++-- .../metallb/charts/metallb/charts/common/README.md | 2 +- .../metallb/charts/common/templates/_resources.tpl | 14 +++++++------- .../metallb/charts/common/templates/_storage.tpl | 7 +++---- .../metallb/templates/speaker/daemonset.yaml | 11 ----------- 10 files changed, 22 insertions(+), 33 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4a3e8c0ea..98a3b906f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,3 +16,4 @@ All releases and the changes included in them (pulled from git commits added sin - Updated: gitlab-runner from version 0.66.0 to 0.67.1 - Updated: matomo from version 7.3.7 to 8.0.5 - Updated: mattermost-team-edition from version 6.6.58 to 6.6.60 +- Updated: metallb from version 6.3.7 to 6.3.9 diff --git a/argocd-helm-charts/metallb/Chart.lock b/argocd-helm-charts/metallb/Chart.lock index e90506425..35aa5aa36 100644 --- a/argocd-helm-charts/metallb/Chart.lock +++ b/argocd-helm-charts/metallb/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: metallb repository: https://charts.bitnami.com/bitnami - version: 6.3.7 -digest: sha256:33d7c9498d83f6eac67ba6674fe4ae3d40c845482e7448143f2544bb8e0ab20a -generated: "2024-07-09T02:56:15.767057154+05:30" + version: 6.3.9 +digest: sha256:dbe5173f0d599475d8bb434c8a51a697016e19859444e9780ef1e246eb754754 +generated: "2024-07-31T20:46:08.120064646+05:30" diff --git a/argocd-helm-charts/metallb/Chart.yaml b/argocd-helm-charts/metallb/Chart.yaml index 3e84f7898..ef2dc90d5 100644 --- a/argocd-helm-charts/metallb/Chart.yaml +++ b/argocd-helm-charts/metallb/Chart.yaml @@ -4,6 +4,6 @@ version: 0.11.0 # see latest chart here: https://github.com/bitnami/charts/tree/master/bitnami/metallb dependencies: - name: metallb - version: 6.3.7 + version: 6.3.9 repository: https://charts.bitnami.com/bitnami #repository: "oci://ghcr.io/Obmondo" diff --git a/argocd-helm-charts/metallb/charts/metallb/Chart.lock b/argocd-helm-charts/metallb/charts/metallb/Chart.lock index 2d4f0b25b..a9fe8f4e4 100644 --- a/argocd-helm-charts/metallb/charts/metallb/Chart.lock +++ b/argocd-helm-charts/metallb/charts/metallb/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: common repository: oci://registry-1.docker.io/bitnamicharts - version: 2.20.3 -digest: sha256:569e1c9d81abdcad3891e065c0f23c83786527d2043f2bc68193c43d18886c19 -generated: "2024-06-18T11:51:05.681419265Z" + version: 2.20.5 +digest: sha256:5b98791747a148b9d4956b81bb8635f49a0ae831869d700d52e514b8fd1a2445 +generated: "2024-07-16T12:12:33.253292+02:00" diff --git a/argocd-helm-charts/metallb/charts/metallb/Chart.yaml b/argocd-helm-charts/metallb/charts/metallb/Chart.yaml index dfefa3de4..2e4d5f6bd 100644 --- a/argocd-helm-charts/metallb/charts/metallb/Chart.yaml +++ b/argocd-helm-charts/metallb/charts/metallb/Chart.yaml @@ -33,4 +33,4 @@ maintainers: name: metallb sources: - https://github.com/bitnami/charts/tree/main/bitnami/metallb -version: 6.3.7 +version: 6.3.9 diff --git a/argocd-helm-charts/metallb/charts/metallb/charts/common/Chart.yaml b/argocd-helm-charts/metallb/charts/metallb/charts/common/Chart.yaml index 23ba4e4e7..dabd80681 100644 --- a/argocd-helm-charts/metallb/charts/metallb/charts/common/Chart.yaml +++ b/argocd-helm-charts/metallb/charts/metallb/charts/common/Chart.yaml @@ -2,7 +2,7 @@ annotations: category: Infrastructure licenses: Apache-2.0 apiVersion: v2 -appVersion: 2.20.3 +appVersion: 2.20.5 description: A Library Helm Chart for grouping common logic between bitnami charts. This chart is not deployable by itself. home: https://bitnami.com @@ -20,4 +20,4 @@ name: common sources: - https://github.com/bitnami/charts/tree/main/bitnami/common type: library -version: 2.20.3 +version: 2.20.5 diff --git a/argocd-helm-charts/metallb/charts/metallb/charts/common/README.md b/argocd-helm-charts/metallb/charts/metallb/charts/common/README.md index 82d78a384..fee26c991 100644 --- a/argocd-helm-charts/metallb/charts/metallb/charts/common/README.md +++ b/argocd-helm-charts/metallb/charts/metallb/charts/common/README.md @@ -24,7 +24,7 @@ data: myvalue: "Hello World" ``` -Looking to use our applications in production? Try [VMware Tanzu Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog. +Looking to use our applications in production? Try [VMware Tanzu Application Catalog](https://bitnami.com/enterprise), the commercial edition of the Bitnami catalog. ## Introduction diff --git a/argocd-helm-charts/metallb/charts/metallb/charts/common/templates/_resources.tpl b/argocd-helm-charts/metallb/charts/metallb/charts/common/templates/_resources.tpl index b4491f782..d8a43e1c2 100644 --- a/argocd-helm-charts/metallb/charts/metallb/charts/common/templates/_resources.tpl +++ b/argocd-helm-charts/metallb/charts/metallb/charts/common/templates/_resources.tpl @@ -15,31 +15,31 @@ These presets are for basic testing and not meant to be used in production {{- $presets := dict "nano" (dict "requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi") ) "micro" (dict "requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi") ) "small" (dict "requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi") ) "medium" (dict "requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi") ) "large" (dict "requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi") ) "xlarge" (dict "requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi") ) "2xlarge" (dict "requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi") ) }} {{- if hasKey $presets .type -}} diff --git a/argocd-helm-charts/metallb/charts/metallb/charts/common/templates/_storage.tpl b/argocd-helm-charts/metallb/charts/metallb/charts/common/templates/_storage.tpl index 7780da18b..aa75856c0 100644 --- a/argocd-helm-charts/metallb/charts/metallb/charts/common/templates/_storage.tpl +++ b/argocd-helm-charts/metallb/charts/metallb/charts/common/templates/_storage.tpl @@ -4,19 +4,18 @@ SPDX-License-Identifier: APACHE-2.0 */}} {{/* vim: set filetype=mustache: */}} + {{/* Return the proper Storage Class {{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} */}} {{- define "common.storage.class" -}} - -{{- $storageClass := default .persistence.storageClass ((.global).storageClass) -}} +{{- $storageClass := (.global).storageClass | default .persistence.storageClass | default (.global).defaultStorageClass | default "" -}} {{- if $storageClass -}} {{- if (eq "-" $storageClass) -}} {{- printf "storageClassName: \"\"" -}} - {{- else }} + {{- else -}} {{- printf "storageClassName: %s" $storageClass -}} {{- end -}} {{- end -}} - {{- end -}} diff --git a/argocd-helm-charts/metallb/charts/metallb/templates/speaker/daemonset.yaml b/argocd-helm-charts/metallb/charts/metallb/templates/speaker/daemonset.yaml index f4c68edf4..79a07eb80 100644 --- a/argocd-helm-charts/metallb/charts/metallb/templates/speaker/daemonset.yaml +++ b/argocd-helm-charts/metallb/charts/metallb/templates/speaker/daemonset.yaml @@ -223,9 +223,6 @@ spec: {{- if .Values.speaker.frr.containerSecurityContext.enabled }} securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.speaker.frr.containerSecurityContext "context" $) | nindent 12 }} {{- end }} - {{- if .Values.speaker.frr.image.pullPolicy }} - imagePullPolicy: {{ .Values.speaker.frr.image.pullPolicy }} - {{- end }} volumeMounts: - name: frr-sockets mountPath: /var/run/frr @@ -251,11 +248,6 @@ spec: {{- else if ne .Values.speaker.frr.resourcesPreset "none" }} resources: {{- include "common.resources.preset" (dict "type" .Values.speaker.frr.resourcesPreset) | nindent 12 }} {{- end }} - {{- if .Values.speaker.frr.resources }} - resources: {{- toYaml .Values.speaker.frr.resources | nindent 12 }} - {{- else if ne .Values.speaker.frr.resourcesPreset "none" }} - resources: {{- include "common.resources.preset" (dict "type" .Values.speaker.frr.resourcesPreset) | nindent 12 }} - {{- end }} - name: reloader image: {{ include "common.images.image" (dict "imageRoot" .Values.speaker.frr.image "global" .Values.global) }} imagePullPolicy: {{ .Values.speaker.frr.image.pullPolicy }} @@ -282,9 +274,6 @@ spec: securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.speaker.frr.containerSecurityContext "context" $) | nindent 12 }} {{- end }} command: ["/etc/frr_metrics/frr-metrics"] - {{- if .Values.speaker.frr.containerSecurityContext.enabled }} - securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.speaker.frr.containerSecurityContext "context" $) | nindent 12 }} - {{- end }} args: - --metrics-port={{ .Values.speaker.frr.containerPorts.metrics }} ports: From affd2650a7b54b9756a9fbc09ff7f3c63bc4b16d Mon Sep 17 00:00:00 2001 From: Hritik Batra Date: Wed, 31 Jul 2024 20:47:14 +0530 Subject: [PATCH 14/25] [CI] Helm Chart Update oncall --- CHANGELOG.md | 1 + argocd-helm-charts/oncall/Chart.lock | 6 +++--- argocd-helm-charts/oncall/Chart.yaml | 2 +- argocd-helm-charts/oncall/charts/oncall/Chart.lock | 2 +- argocd-helm-charts/oncall/charts/oncall/Chart.yaml | 4 ++-- 5 files changed, 8 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 98a3b906f..63fb545e3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,3 +17,4 @@ All releases and the changes included in them (pulled from git commits added sin - Updated: matomo from version 7.3.7 to 8.0.5 - Updated: mattermost-team-edition from version 6.6.58 to 6.6.60 - Updated: metallb from version 6.3.7 to 6.3.9 +- Updated: oncall from version 1.8.1 to 1.8.8 diff --git a/argocd-helm-charts/oncall/Chart.lock b/argocd-helm-charts/oncall/Chart.lock index a2da23b0f..50f71a17d 100644 --- a/argocd-helm-charts/oncall/Chart.lock +++ b/argocd-helm-charts/oncall/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: oncall repository: https://grafana.github.io/helm-charts - version: 1.8.1 -digest: sha256:22c685b02788c59057132102f531674ae4f535cccd7334a5c6d1f554a8886882 -generated: "2024-07-09T02:56:15.357248751+05:30" + version: 1.8.8 +digest: sha256:73df6a61c3b4d4b3eb9073e0ec0cd4aaead8ab179b3c186f27c642239569340a +generated: "2024-07-31T20:46:59.370367416+05:30" diff --git a/argocd-helm-charts/oncall/Chart.yaml b/argocd-helm-charts/oncall/Chart.yaml index f87f927d0..7929e525b 100644 --- a/argocd-helm-charts/oncall/Chart.yaml +++ b/argocd-helm-charts/oncall/Chart.yaml @@ -3,5 +3,5 @@ name: oncall version: 1.0.0 dependencies: - name: oncall - version: 1.8.1 + version: 1.8.8 repository: https://grafana.github.io/helm-charts diff --git a/argocd-helm-charts/oncall/charts/oncall/Chart.lock b/argocd-helm-charts/oncall/charts/oncall/Chart.lock index e5a72c755..1fc037ee1 100644 --- a/argocd-helm-charts/oncall/charts/oncall/Chart.lock +++ b/argocd-helm-charts/oncall/charts/oncall/Chart.lock @@ -24,4 +24,4 @@ dependencies: repository: https://prometheus-community.github.io/helm-charts version: 25.8.2 digest: sha256:edc9fef449a694cd319135e37ac84f8247ac9ad0c48ac86099dae4e428beb7b7 -generated: "2024-07-08T16:40:03.711455938Z" +generated: "2024-07-25T13:49:59.763377514Z" diff --git a/argocd-helm-charts/oncall/charts/oncall/Chart.yaml b/argocd-helm-charts/oncall/charts/oncall/Chart.yaml index b2a4f604d..78c58012f 100644 --- a/argocd-helm-charts/oncall/charts/oncall/Chart.yaml +++ b/argocd-helm-charts/oncall/charts/oncall/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: v1.8.1 +appVersion: v1.8.8 dependencies: - condition: cert-manager.enabled name: cert-manager @@ -36,4 +36,4 @@ dependencies: description: Developer-friendly incident response with brilliant Slack integration name: oncall type: application -version: 1.8.1 +version: 1.8.8 From a62aa55e9b23ca1f2e89d4a8abb54f7998e19168 Mon Sep 17 00:00:00 2001 From: Hritik Batra Date: Wed, 31 Jul 2024 20:47:53 +0530 Subject: [PATCH 15/25] [CI] Helm Chart Update opencost --- CHANGELOG.md | 1 + argocd-helm-charts/opencost/Chart.lock | 6 ++--- argocd-helm-charts/opencost/Chart.yaml | 2 +- .../opencost/charts/opencost/Chart.yaml | 2 +- .../opencost/charts/opencost/README.md | 4 ++-- .../charts/opencost/templates/_helpers.tpl | 24 +++++++++++++++++++ .../charts/opencost/templates/deployment.yaml | 12 ++-------- .../opencost/charts/opencost/values.yaml | 7 +++--- 8 files changed, 38 insertions(+), 20 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 63fb545e3..aa27caaca 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,3 +18,4 @@ All releases and the changes included in them (pulled from git commits added sin - Updated: mattermost-team-edition from version 6.6.58 to 6.6.60 - Updated: metallb from version 6.3.7 to 6.3.9 - Updated: oncall from version 1.8.1 to 1.8.8 +- Updated: opencost from version 1.40.0 to 1.41.0 diff --git a/argocd-helm-charts/opencost/Chart.lock b/argocd-helm-charts/opencost/Chart.lock index 133a05c1f..6611fa130 100644 --- a/argocd-helm-charts/opencost/Chart.lock +++ b/argocd-helm-charts/opencost/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: opencost repository: https://opencost.github.io/opencost-helm-chart - version: 1.40.0 -digest: sha256:4f91b2011785b90dd8452e8be154dcaeae1ae904a258970b34c607ae0232e221 -generated: "2024-07-09T02:56:48.708114067+05:30" + version: 1.41.0 +digest: sha256:c48a2d4b55a0b7f6bfb1583d1937ddb06a23fdf77343b0bc75fbc0bf1d04f0ce +generated: "2024-07-31T20:47:38.087285717+05:30" diff --git a/argocd-helm-charts/opencost/Chart.yaml b/argocd-helm-charts/opencost/Chart.yaml index 1a923770c..99430be6c 100644 --- a/argocd-helm-charts/opencost/Chart.yaml +++ b/argocd-helm-charts/opencost/Chart.yaml @@ -3,5 +3,5 @@ name: opencost version: 1.0.0 dependencies: - name: opencost - version: 1.40.0 + version: 1.41.0 repository: https://opencost.github.io/opencost-helm-chart diff --git a/argocd-helm-charts/opencost/charts/opencost/Chart.yaml b/argocd-helm-charts/opencost/charts/opencost/Chart.yaml index 58cdf3b89..e68536701 100644 --- a/argocd-helm-charts/opencost/charts/opencost/Chart.yaml +++ b/argocd-helm-charts/opencost/charts/opencost/Chart.yaml @@ -16,4 +16,4 @@ maintainers: name: brito-rafa name: opencost type: application -version: 1.40.0 +version: 1.41.0 diff --git a/argocd-helm-charts/opencost/charts/opencost/README.md b/argocd-helm-charts/opencost/charts/opencost/README.md index 984219b9d..69c7d4531 100644 --- a/argocd-helm-charts/opencost/charts/opencost/README.md +++ b/argocd-helm-charts/opencost/charts/opencost/README.md @@ -2,7 +2,7 @@ OpenCost and OpenCost UI -![Version: 1.40.0](https://img.shields.io/badge/Version-1.40.0-informational?style=flat-square) +![Version: 1.41.0](https://img.shields.io/badge/Version-1.41.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.111.0](https://img.shields.io/badge/AppVersion-1.111.0-informational?style=flat-square) [![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/opencost)](https://artifacthub.io/packages/search?repo=opencost) @@ -70,7 +70,7 @@ $ helm install opencost opencost/opencost | opencost.exporter.image.pullPolicy | string | `"IfNotPresent"` | Exporter container image pull policy | | opencost.exporter.image.registry | string | `"ghcr.io"` | Exporter container image registry | | opencost.exporter.image.repository | string | `"opencost/opencost"` | Exporter container image name | -| opencost.exporter.image.tag | string | `""` (use appVersion in Chart.yaml) | Exporter container image tag | +| opencost.exporter.image.tag | string | `"1.111.0@sha256:6aa68e52a24b14ba41f23db08d1b9db1429a1c0300f4c0381ecc2c61fc311a97"` | Exporter container image tag | | opencost.exporter.livenessProbe.enabled | bool | `true` | Whether probe is enabled | | opencost.exporter.livenessProbe.failureThreshold | int | `3` | Number of failures for probe to be considered failed | | opencost.exporter.livenessProbe.initialDelaySeconds | int | `10` | Number of seconds before probe is initiated | diff --git a/argocd-helm-charts/opencost/charts/opencost/templates/_helpers.tpl b/argocd-helm-charts/opencost/charts/opencost/templates/_helpers.tpl index 282fc0e75..ec38d3df4 100644 --- a/argocd-helm-charts/opencost/charts/opencost/templates/_helpers.tpl +++ b/argocd-helm-charts/opencost/charts/opencost/templates/_helpers.tpl @@ -156,3 +156,27 @@ apiVersion: networking.k8s.io/v1 apiVersion: networking.k8s.io/v1beta1 {{- end }} {{- end -}} + +{{- define "opencost.imageTag" -}} +{{ .Values.opencost.exporter.image.tag | default (printf "%s" .Chart.AppVersion) }} +{{- end -}} + +{{- define "opencost.fullImageName" -}} +{{- if .Values.opencost.exporter.image.fullImageName }} +{{- .Values.opencost.exporter.image.fullImageName -}} +{{- else}} +{{- .Values.opencost.exporter.image.registry -}}/{{- .Values.opencost.exporter.image.repository -}}:{{- include "opencost.imageTag" . -}} +{{- end -}} +{{- end -}} + +{{- define "opencostUi.imageTag" -}} +{{- .Values.opencost.ui.image.tag | default (printf "%s" .Chart.AppVersion) -}} +{{- end -}} + +{{- define "opencostUi.fullImageName" -}} +{{- if .Values.opencost.ui.image.fullImageName }} +{{- .Values.opencost.ui.image.fullImageName -}} +{{- else}} +{{- .Values.opencost.ui.image.registry -}}/{{- .Values.opencost.ui.image.repository -}}:{{- include "opencostUi.imageTag" . -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/argocd-helm-charts/opencost/charts/opencost/templates/deployment.yaml b/argocd-helm-charts/opencost/charts/opencost/templates/deployment.yaml index 7fbee2ddc..1d8e29126 100644 --- a/argocd-helm-charts/opencost/charts/opencost/templates/deployment.yaml +++ b/argocd-helm-charts/opencost/charts/opencost/templates/deployment.yaml @@ -62,11 +62,7 @@ spec: {{- end }} containers: - name: {{ include "opencost.fullname" . }} - {{- if .Values.opencost.exporter.image.fullImageName }} - image: {{ .Values.opencost.exporter.image.fullImageName }} - {{- else }} - image: "{{ .Values.opencost.exporter.image.registry }}/{{ .Values.opencost.exporter.image.repository }}:{{ .Values.opencost.exporter.image.tag | default (printf "%s" .Chart.AppVersion) }}" - {{- end}} + image: {{ include "opencost.fullImageName" . }} imagePullPolicy: {{ .Values.opencost.exporter.image.pullPolicy }} args: {{- range .Values.opencost.exporter.extraArgs }} @@ -268,11 +264,7 @@ spec: {{- end }} {{- if .Values.opencost.ui.enabled }} - name: opencost-ui - {{- if .Values.opencost.ui.image.fullImageName }} - image: {{ .Values.opencost.ui.image.fullImageName }} - {{- else}} - image: "{{ .Values.opencost.ui.image.registry }}/{{ .Values.opencost.ui.image.repository }}:{{ .Values.opencost.ui.image.tag | default (printf "%s" .Chart.AppVersion) }}" - {{- end }} + image: {{ include "opencostUi.fullImageName" .}} imagePullPolicy: {{ .Values.opencost.ui.image.pullPolicy }} ports: - containerPort: {{ .Values.opencost.ui.uiPort }} diff --git a/argocd-helm-charts/opencost/charts/opencost/values.yaml b/argocd-helm-charts/opencost/charts/opencost/values.yaml index 87a710019..ed1ce7768 100644 --- a/argocd-helm-charts/opencost/charts/opencost/values.yaml +++ b/argocd-helm-charts/opencost/charts/opencost/values.yaml @@ -130,8 +130,7 @@ opencost: # -- Exporter container image name repository: opencost/opencost # -- Exporter container image tag - # @default -- `""` (use appVersion in Chart.yaml) - tag: "" + tag: "1.111.0@sha256:6aa68e52a24b14ba41f23db08d1b9db1429a1c0300f4c0381ecc2c61fc311a97" # -- Exporter container image pull policy pullPolicy: IfNotPresent # -- Override the full image name for development purposes @@ -221,6 +220,8 @@ opencost: # -- Any extra environment variables you would like to pass on to the pod extraEnv: {} # FOO: BAR + # For example, if accessing mimir directly and getting 401 Unauthorized + # PROMETHEUS_HEADER_X_SCOPE_ORGID: anonymous customPricing: # -- Enables custom pricing configuration enabled: false @@ -374,7 +375,7 @@ opencost: repository: opencost/opencost-ui # -- UI container image tag # @default -- `""` (use appVersion in Chart.yaml) - tag: "" + tag: "1.111.0@sha256:f7221e7a708d71663f5eca6c238268757eb4352f3e9f46b1029d33ab4e53fd8a" # -- UI container image pull policy pullPolicy: IfNotPresent # -- Override the full image name for development purposes From b0ce6608e3fe4fd7301928fff2dd7dfc8590c804 Mon Sep 17 00:00:00 2001 From: Hritik Batra Date: Wed, 31 Jul 2024 20:48:38 +0530 Subject: [PATCH 16/25] [CI] Helm Chart Update opensearch-dashboards --- CHANGELOG.md | 1 + argocd-helm-charts/opensearch-dashboards/Chart.lock | 6 +++--- argocd-helm-charts/opensearch-dashboards/Chart.yaml | 2 +- .../charts/opensearch-dashboards/CHANGELOG.md | 12 +++++++++++- .../charts/opensearch-dashboards/Chart.yaml | 2 +- .../opensearch-dashboards/templates/deployment.yaml | 2 +- 6 files changed, 18 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index aa27caaca..24b583f81 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,3 +19,4 @@ All releases and the changes included in them (pulled from git commits added sin - Updated: metallb from version 6.3.7 to 6.3.9 - Updated: oncall from version 1.8.1 to 1.8.8 - Updated: opencost from version 1.40.0 to 1.41.0 +- Updated: opensearch-dashboards from version 2.19.0 to 2.19.1 diff --git a/argocd-helm-charts/opensearch-dashboards/Chart.lock b/argocd-helm-charts/opensearch-dashboards/Chart.lock index 3ed7aaf62..9a8baa304 100644 --- a/argocd-helm-charts/opensearch-dashboards/Chart.lock +++ b/argocd-helm-charts/opensearch-dashboards/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: opensearch-dashboards repository: https://opensearch-project.github.io/helm-charts/ - version: 2.19.0 -digest: sha256:97adfc936a24ec57483041395d101591566e648a43a4384d1a88b82cfee68435 -generated: "2024-07-09T02:56:28.736615962+05:30" + version: 2.19.1 +digest: sha256:73857b1ddec3a28b71ea8cde4241c7994e86567171fc09595cf4d8b71397b2b5 +generated: "2024-07-31T20:48:23.669300732+05:30" diff --git a/argocd-helm-charts/opensearch-dashboards/Chart.yaml b/argocd-helm-charts/opensearch-dashboards/Chart.yaml index 76932906e..032bfa1fc 100644 --- a/argocd-helm-charts/opensearch-dashboards/Chart.yaml +++ b/argocd-helm-charts/opensearch-dashboards/Chart.yaml @@ -5,5 +5,5 @@ appVersion: "1.0.0" version: 1.2.0 dependencies: - name: opensearch-dashboards - version: 2.19.0 + version: 2.19.1 repository: https://opensearch-project.github.io/helm-charts/ diff --git a/argocd-helm-charts/opensearch-dashboards/charts/opensearch-dashboards/CHANGELOG.md b/argocd-helm-charts/opensearch-dashboards/charts/opensearch-dashboards/CHANGELOG.md index 7240ae9d8..d8f781273 100644 --- a/argocd-helm-charts/opensearch-dashboards/charts/opensearch-dashboards/CHANGELOG.md +++ b/argocd-helm-charts/opensearch-dashboards/charts/opensearch-dashboards/CHANGELOG.md @@ -13,6 +13,15 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Fixed ### Security --- +## [2.19.1] +### Added +### Changed +### Deprecated +### Removed +### Fixed +- Fixed pod topology spread constraints in Dashboards +### Security +--- ## [2.19.0] ### Added - Updated OpenSearch Dashboards appVersion to 2.15.0 @@ -340,7 +349,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Fixed ### Security -[Unreleased]: https://github.com/opensearch-project/helm-charts/compare/opensearch-dashboards-2.19.0...HEAD +[Unreleased]: https://github.com/opensearch-project/helm-charts/compare/opensearch-dashboards-2.19.1...HEAD +[2.19.1]: https://github.com/opensearch-project/helm-charts/compare/opensearch-dashboards-2.19.0...opensearch-dashboards-2.19.1 [2.19.0]: https://github.com/opensearch-project/helm-charts/compare/opensearch-dashboards-2.18.0...opensearch-dashboards-2.19.0 [2.18.0]: https://github.com/opensearch-project/helm-charts/compare/opensearch-dashboards-2.17.0...opensearch-dashboards-2.18.0 [2.17.0]: https://github.com/opensearch-project/helm-charts/compare/opensearch-dashboards-2.16.0...opensearch-dashboards-2.17.0 diff --git a/argocd-helm-charts/opensearch-dashboards/charts/opensearch-dashboards/Chart.yaml b/argocd-helm-charts/opensearch-dashboards/charts/opensearch-dashboards/Chart.yaml index a2d3e7017..1c8bb7ce4 100644 --- a/argocd-helm-charts/opensearch-dashboards/charts/opensearch-dashboards/Chart.yaml +++ b/argocd-helm-charts/opensearch-dashboards/charts/opensearch-dashboards/Chart.yaml @@ -9,4 +9,4 @@ maintainers: - name: TheAlgo name: opensearch-dashboards type: application -version: 2.19.0 +version: 2.19.1 diff --git a/argocd-helm-charts/opensearch-dashboards/charts/opensearch-dashboards/templates/deployment.yaml b/argocd-helm-charts/opensearch-dashboards/charts/opensearch-dashboards/templates/deployment.yaml index f1d4916ab..256cf054d 100644 --- a/argocd-helm-charts/opensearch-dashboards/charts/opensearch-dashboards/templates/deployment.yaml +++ b/argocd-helm-charts/opensearch-dashboards/charts/opensearch-dashboards/templates/deployment.yaml @@ -79,7 +79,7 @@ spec: {{- end }} {{- if .Values.topologySpreadConstraints }} topologySpreadConstraints: -{- toYaml .Values.topologySpreadConstraints | nindent 8 }} +{{- toYaml .Values.topologySpreadConstraints | nindent 8 }} {{- end }} {{- if .Values.imagePullSecrets }} imagePullSecrets: From 665927b97e352ec9b5037e7f28afe62f04707606 Mon Sep 17 00:00:00 2001 From: Hritik Batra Date: Wed, 31 Jul 2024 20:49:40 +0530 Subject: [PATCH 17/25] [CI] Helm Chart Update rabbitmq-operator --- CHANGELOG.md | 1 + argocd-helm-charts/rabbitmq-operator/Chart.lock | 6 +++--- argocd-helm-charts/rabbitmq-operator/Chart.yaml | 2 +- .../charts/rabbitmq-cluster-operator/Chart.lock | 6 +++--- .../charts/rabbitmq-cluster-operator/Chart.yaml | 10 +++++----- .../charts/rabbitmq-cluster-operator/README.md | 3 ++- .../charts/common/Chart.yaml | 4 ++-- .../charts/common/README.md | 2 +- .../charts/common/templates/_resources.tpl | 14 +++++++------- .../charts/common/templates/_storage.tpl | 7 +++---- .../charts/rabbitmq-cluster-operator/values.yaml | 12 +++++++----- 11 files changed, 35 insertions(+), 32 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 24b583f81..cb52d4f4e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,3 +20,4 @@ All releases and the changes included in them (pulled from git commits added sin - Updated: oncall from version 1.8.1 to 1.8.8 - Updated: opencost from version 1.40.0 to 1.41.0 - Updated: opensearch-dashboards from version 2.19.0 to 2.19.1 +- Updated: rabbitmq-cluster-operator from version 4.3.13 to 4.3.16 diff --git a/argocd-helm-charts/rabbitmq-operator/Chart.lock b/argocd-helm-charts/rabbitmq-operator/Chart.lock index 9cc2d1feb..c6e417af1 100644 --- a/argocd-helm-charts/rabbitmq-operator/Chart.lock +++ b/argocd-helm-charts/rabbitmq-operator/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: rabbitmq-cluster-operator repository: https://charts.bitnami.com/bitnami - version: 4.3.13 -digest: sha256:7812503ca0b20e4be21ee2cb4c3f576be3ec4f52d326d0d1fb926672ad8ce3d4 -generated: "2024-07-09T02:57:20.434255636+05:30" + version: 4.3.16 +digest: sha256:5165fa1ac289850eb20e03c8330e0d6d6b599583fe89aafc275468e2b1a19b86 +generated: "2024-07-31T20:49:30.003870078+05:30" diff --git a/argocd-helm-charts/rabbitmq-operator/Chart.yaml b/argocd-helm-charts/rabbitmq-operator/Chart.yaml index 30fbebcfc..20489f438 100644 --- a/argocd-helm-charts/rabbitmq-operator/Chart.yaml +++ b/argocd-helm-charts/rabbitmq-operator/Chart.yaml @@ -3,5 +3,5 @@ name: rabbitmq-operator version: 1.0.0 dependencies: - name: rabbitmq-cluster-operator - version: 4.3.13 + version: 4.3.16 repository: https://charts.bitnami.com/bitnami diff --git a/argocd-helm-charts/rabbitmq-operator/charts/rabbitmq-cluster-operator/Chart.lock b/argocd-helm-charts/rabbitmq-operator/charts/rabbitmq-cluster-operator/Chart.lock index d33168b28..6caefbae0 100644 --- a/argocd-helm-charts/rabbitmq-operator/charts/rabbitmq-cluster-operator/Chart.lock +++ b/argocd-helm-charts/rabbitmq-operator/charts/rabbitmq-cluster-operator/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: common repository: oci://registry-1.docker.io/bitnamicharts - version: 2.20.3 -digest: sha256:569e1c9d81abdcad3891e065c0f23c83786527d2043f2bc68193c43d18886c19 -generated: "2024-06-18T12:06:05.935964655Z" + version: 2.20.5 +digest: sha256:5b98791747a148b9d4956b81bb8635f49a0ae831869d700d52e514b8fd1a2445 +generated: "2024-07-16T12:17:30.845825+02:00" diff --git a/argocd-helm-charts/rabbitmq-operator/charts/rabbitmq-cluster-operator/Chart.yaml b/argocd-helm-charts/rabbitmq-operator/charts/rabbitmq-cluster-operator/Chart.yaml index 389b8b1be..07d7645f2 100644 --- a/argocd-helm-charts/rabbitmq-operator/charts/rabbitmq-cluster-operator/Chart.yaml +++ b/argocd-helm-charts/rabbitmq-operator/charts/rabbitmq-cluster-operator/Chart.yaml @@ -2,13 +2,13 @@ annotations: category: Infrastructure images: | - name: rabbitmq - image: docker.io/bitnami/rabbitmq:3.13.4-debian-12-r0 + image: docker.io/bitnami/rabbitmq:3.13.6-debian-12-r0 - name: rabbitmq-cluster-operator - image: docker.io/bitnami/rabbitmq-cluster-operator:2.9.0-debian-12-r4 + image: docker.io/bitnami/rabbitmq-cluster-operator:2.9.0-debian-12-r6 - name: rmq-default-credential-updater - image: docker.io/bitnami/rmq-default-credential-updater:1.0.4-debian-12-r22 + image: docker.io/bitnami/rmq-default-credential-updater:1.0.4-debian-12-r24 - name: rmq-messaging-topology-operator - image: docker.io/bitnami/rmq-messaging-topology-operator:1.14.2-debian-12-r1 + image: docker.io/bitnami/rmq-messaging-topology-operator:1.14.2-debian-12-r3 licenses: Apache-2.0 apiVersion: v2 appVersion: 2.9.0 @@ -35,4 +35,4 @@ maintainers: name: rabbitmq-cluster-operator sources: - https://github.com/bitnami/charts/tree/main/bitnami/rabbitmq-cluster-operator -version: 4.3.13 +version: 4.3.16 diff --git a/argocd-helm-charts/rabbitmq-operator/charts/rabbitmq-cluster-operator/README.md b/argocd-helm-charts/rabbitmq-operator/charts/rabbitmq-cluster-operator/README.md index 8dc1dbcac..38b231326 100644 --- a/argocd-helm-charts/rabbitmq-operator/charts/rabbitmq-cluster-operator/README.md +++ b/argocd-helm-charts/rabbitmq-operator/charts/rabbitmq-cluster-operator/README.md @@ -222,7 +222,8 @@ extraDeploy: | ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------ | | `global.imageRegistry` | Global Docker image registry | `""` | | `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | -| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | +| `global.defaultStorageClass` | Global default StorageClass for Persistent Volume(s) | `""` | +| `global.storageClass` | DEPRECATED: use global.defaultStorageClass instead | `""` | | `global.compatibility.openshift.adaptSecurityContext` | Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) | `auto` | ### Common parameters diff --git a/argocd-helm-charts/rabbitmq-operator/charts/rabbitmq-cluster-operator/charts/common/Chart.yaml b/argocd-helm-charts/rabbitmq-operator/charts/rabbitmq-cluster-operator/charts/common/Chart.yaml index 23ba4e4e7..dabd80681 100644 --- a/argocd-helm-charts/rabbitmq-operator/charts/rabbitmq-cluster-operator/charts/common/Chart.yaml +++ b/argocd-helm-charts/rabbitmq-operator/charts/rabbitmq-cluster-operator/charts/common/Chart.yaml @@ -2,7 +2,7 @@ annotations: category: Infrastructure licenses: Apache-2.0 apiVersion: v2 -appVersion: 2.20.3 +appVersion: 2.20.5 description: A Library Helm Chart for grouping common logic between bitnami charts. This chart is not deployable by itself. home: https://bitnami.com @@ -20,4 +20,4 @@ name: common sources: - https://github.com/bitnami/charts/tree/main/bitnami/common type: library -version: 2.20.3 +version: 2.20.5 diff --git a/argocd-helm-charts/rabbitmq-operator/charts/rabbitmq-cluster-operator/charts/common/README.md b/argocd-helm-charts/rabbitmq-operator/charts/rabbitmq-cluster-operator/charts/common/README.md index 82d78a384..fee26c991 100644 --- a/argocd-helm-charts/rabbitmq-operator/charts/rabbitmq-cluster-operator/charts/common/README.md +++ b/argocd-helm-charts/rabbitmq-operator/charts/rabbitmq-cluster-operator/charts/common/README.md @@ -24,7 +24,7 @@ data: myvalue: "Hello World" ``` -Looking to use our applications in production? Try [VMware Tanzu Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog. +Looking to use our applications in production? Try [VMware Tanzu Application Catalog](https://bitnami.com/enterprise), the commercial edition of the Bitnami catalog. ## Introduction diff --git a/argocd-helm-charts/rabbitmq-operator/charts/rabbitmq-cluster-operator/charts/common/templates/_resources.tpl b/argocd-helm-charts/rabbitmq-operator/charts/rabbitmq-cluster-operator/charts/common/templates/_resources.tpl index b4491f782..d8a43e1c2 100644 --- a/argocd-helm-charts/rabbitmq-operator/charts/rabbitmq-cluster-operator/charts/common/templates/_resources.tpl +++ b/argocd-helm-charts/rabbitmq-operator/charts/rabbitmq-cluster-operator/charts/common/templates/_resources.tpl @@ -15,31 +15,31 @@ These presets are for basic testing and not meant to be used in production {{- $presets := dict "nano" (dict "requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi") ) "micro" (dict "requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi") ) "small" (dict "requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi") ) "medium" (dict "requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi") ) "large" (dict "requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi") ) "xlarge" (dict "requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi") ) "2xlarge" (dict "requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi") ) }} {{- if hasKey $presets .type -}} diff --git a/argocd-helm-charts/rabbitmq-operator/charts/rabbitmq-cluster-operator/charts/common/templates/_storage.tpl b/argocd-helm-charts/rabbitmq-operator/charts/rabbitmq-cluster-operator/charts/common/templates/_storage.tpl index 7780da18b..aa75856c0 100644 --- a/argocd-helm-charts/rabbitmq-operator/charts/rabbitmq-cluster-operator/charts/common/templates/_storage.tpl +++ b/argocd-helm-charts/rabbitmq-operator/charts/rabbitmq-cluster-operator/charts/common/templates/_storage.tpl @@ -4,19 +4,18 @@ SPDX-License-Identifier: APACHE-2.0 */}} {{/* vim: set filetype=mustache: */}} + {{/* Return the proper Storage Class {{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} */}} {{- define "common.storage.class" -}} - -{{- $storageClass := default .persistence.storageClass ((.global).storageClass) -}} +{{- $storageClass := (.global).storageClass | default .persistence.storageClass | default (.global).defaultStorageClass | default "" -}} {{- if $storageClass -}} {{- if (eq "-" $storageClass) -}} {{- printf "storageClassName: \"\"" -}} - {{- else }} + {{- else -}} {{- printf "storageClassName: %s" $storageClass -}} {{- end -}} {{- end -}} - {{- end -}} diff --git a/argocd-helm-charts/rabbitmq-operator/charts/rabbitmq-cluster-operator/values.yaml b/argocd-helm-charts/rabbitmq-operator/charts/rabbitmq-cluster-operator/values.yaml index 189ac252e..613755959 100644 --- a/argocd-helm-charts/rabbitmq-operator/charts/rabbitmq-cluster-operator/values.yaml +++ b/argocd-helm-charts/rabbitmq-operator/charts/rabbitmq-cluster-operator/values.yaml @@ -9,7 +9,8 @@ ## @param global.imageRegistry Global Docker image registry ## @param global.imagePullSecrets Global Docker registry secret names as an array -## @param global.storageClass Global StorageClass for Persistent Volume(s) +## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s) +## @param global.storageClass DEPRECATED: use global.defaultStorageClass instead ## global: imageRegistry: "" @@ -18,6 +19,7 @@ global: ## - myRegistryKeySecretName ## imagePullSecrets: [] + defaultStorageClass: "" storageClass: "" ## Compatibility adaptations for Kubernetes platforms ## @@ -72,7 +74,7 @@ diagnosticMode: rabbitmqImage: registry: docker.io repository: bitnami/rabbitmq - tag: 3.13.4-debian-12-r0 + tag: 3.13.6-debian-12-r0 digest: "" ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. @@ -93,7 +95,7 @@ rabbitmqImage: credentialUpdaterImage: registry: docker.io repository: bitnami/rmq-default-credential-updater - tag: 1.0.4-debian-12-r22 + tag: 1.0.4-debian-12-r24 digest: "" ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. @@ -116,7 +118,7 @@ clusterOperator: image: registry: docker.io repository: bitnami/rabbitmq-cluster-operator - tag: 2.9.0-debian-12-r4 + tag: 2.9.0-debian-12-r6 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' @@ -649,7 +651,7 @@ msgTopologyOperator: image: registry: docker.io repository: bitnami/rmq-messaging-topology-operator - tag: 1.14.2-debian-12-r1 + tag: 1.14.2-debian-12-r3 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' From be96c5bf196beb0ee4b81636aab4fe8770122e43 Mon Sep 17 00:00:00 2001 From: Hritik Batra Date: Wed, 31 Jul 2024 20:50:17 +0530 Subject: [PATCH 18/25] [CI] Helm Chart Update redmine --- CHANGELOG.md | 1 + argocd-helm-charts/redmine/Chart.lock | 6 +++--- argocd-helm-charts/redmine/Chart.yaml | 2 +- .../redmine/charts/redmine/Chart.lock | 10 +++++----- .../redmine/charts/redmine/Chart.yaml | 8 ++++---- .../redmine/charts/redmine/README.md | 7 ++++++- .../charts/redmine/charts/common/Chart.yaml | 4 ++-- .../redmine/charts/redmine/charts/common/README.md | 2 +- .../redmine/charts/common/templates/_resources.tpl | 14 +++++++------- .../redmine/charts/common/templates/_storage.tpl | 7 +++---- .../redmine/charts/redmine/values.yaml | 8 +++++--- 11 files changed, 38 insertions(+), 31 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cb52d4f4e..ec4fac196 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,3 +21,4 @@ All releases and the changes included in them (pulled from git commits added sin - Updated: opencost from version 1.40.0 to 1.41.0 - Updated: opensearch-dashboards from version 2.19.0 to 2.19.1 - Updated: rabbitmq-cluster-operator from version 4.3.13 to 4.3.16 +- Updated: redmine from version 28.2.7 to 29.0.3 diff --git a/argocd-helm-charts/redmine/Chart.lock b/argocd-helm-charts/redmine/Chart.lock index dc9355d0d..fea3759d7 100644 --- a/argocd-helm-charts/redmine/Chart.lock +++ b/argocd-helm-charts/redmine/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: redmine repository: https://charts.bitnami.com/bitnami - version: 28.2.7 -digest: sha256:f46b1c5b03130bba373568471141851b91f85a77bceec38c4741f9052dc8a03c -generated: "2024-07-09T02:57:36.315519544+05:30" + version: 29.0.3 +digest: sha256:2a8c04b06632f641a00351c9061f53ab7d044b10c37a71521d48419ed885ee31 +generated: "2024-07-31T20:50:07.212240334+05:30" diff --git a/argocd-helm-charts/redmine/Chart.yaml b/argocd-helm-charts/redmine/Chart.yaml index 5265771ee..c53c88847 100644 --- a/argocd-helm-charts/redmine/Chart.yaml +++ b/argocd-helm-charts/redmine/Chart.yaml @@ -3,5 +3,5 @@ name: redmine version: 1.0.0 dependencies: - name: redmine - version: 28.2.7 + version: 29.0.3 repository: https://charts.bitnami.com/bitnami diff --git a/argocd-helm-charts/redmine/charts/redmine/Chart.lock b/argocd-helm-charts/redmine/charts/redmine/Chart.lock index 4e3426e95..a47fc59b6 100644 --- a/argocd-helm-charts/redmine/charts/redmine/Chart.lock +++ b/argocd-helm-charts/redmine/charts/redmine/Chart.lock @@ -1,12 +1,12 @@ dependencies: - name: postgresql repository: oci://registry-1.docker.io/bitnamicharts - version: 15.5.12 + version: 15.5.19 - name: mariadb repository: oci://registry-1.docker.io/bitnamicharts - version: 18.2.6 + version: 19.0.3 - name: common repository: oci://registry-1.docker.io/bitnamicharts - version: 2.20.3 -digest: sha256:ab08aa748d6e77ec8b1967c6675539418d571923f35c80252df1da3c19a41034 -generated: "2024-07-03T07:21:56.504617398Z" + version: 2.20.5 +digest: sha256:8412453fcd2f2052a9742385b9e9291c60d1954667f167f5432b779e66924cfb +generated: "2024-07-25T08:57:09.769517458Z" diff --git a/argocd-helm-charts/redmine/charts/redmine/Chart.yaml b/argocd-helm-charts/redmine/charts/redmine/Chart.yaml index bb26297e4..d2f35a3f0 100644 --- a/argocd-helm-charts/redmine/charts/redmine/Chart.yaml +++ b/argocd-helm-charts/redmine/charts/redmine/Chart.yaml @@ -2,9 +2,9 @@ annotations: category: ProjectManagement images: | - name: os-shell - image: docker.io/bitnami/os-shell:12-debian-12-r24 + image: docker.io/bitnami/os-shell:12-debian-12-r26 - name: redmine - image: docker.io/bitnami/redmine:5.1.3-debian-12-r1 + image: docker.io/bitnami/redmine:5.1.3-debian-12-r5 licenses: Apache-2.0 apiVersion: v2 appVersion: 5.1.3 @@ -16,7 +16,7 @@ dependencies: - condition: mariadb.enabled name: mariadb repository: oci://registry-1.docker.io/bitnamicharts - version: 18.x.x + version: 19.x.x - name: common repository: oci://registry-1.docker.io/bitnamicharts tags: @@ -42,4 +42,4 @@ maintainers: name: redmine sources: - https://github.com/bitnami/charts/tree/main/bitnami/redmine -version: 28.2.7 +version: 29.0.3 diff --git a/argocd-helm-charts/redmine/charts/redmine/README.md b/argocd-helm-charts/redmine/charts/redmine/README.md index 8f3be6f50..1c955d768 100644 --- a/argocd-helm-charts/redmine/charts/redmine/README.md +++ b/argocd-helm-charts/redmine/charts/redmine/README.md @@ -238,7 +238,8 @@ helm install test --set persistence.existingClaim=PVC_REDMINE,mariadb.persistenc | ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------ | | `global.imageRegistry` | Global Docker image registry | `""` | | `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | -| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | +| `global.defaultStorageClass` | Global default StorageClass for Persistent Volume(s) | `""` | +| `global.storageClass` | DEPRECATED: use global.defaultStorageClass instead | `""` | | `global.compatibility.openshift.adaptSecurityContext` | Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) | `auto` | ### Common parameters @@ -572,6 +573,10 @@ Find more information about how to deal with common errors related to Bitnami's ## Upgrading +### To 29.0.0 + +This major release bumps the MariaDB version to 11.4. Follow the [upstream instructions](https://mariadb.com/kb/en/upgrading-from-mariadb-11-3-to-mariadb-11-4/) for upgrading from MariaDB 11.3 to 11.4. No major issues are expected during the upgrade. + ### To 28.0.0 This major bump changes the following security defaults: diff --git a/argocd-helm-charts/redmine/charts/redmine/charts/common/Chart.yaml b/argocd-helm-charts/redmine/charts/redmine/charts/common/Chart.yaml index 23ba4e4e7..dabd80681 100644 --- a/argocd-helm-charts/redmine/charts/redmine/charts/common/Chart.yaml +++ b/argocd-helm-charts/redmine/charts/redmine/charts/common/Chart.yaml @@ -2,7 +2,7 @@ annotations: category: Infrastructure licenses: Apache-2.0 apiVersion: v2 -appVersion: 2.20.3 +appVersion: 2.20.5 description: A Library Helm Chart for grouping common logic between bitnami charts. This chart is not deployable by itself. home: https://bitnami.com @@ -20,4 +20,4 @@ name: common sources: - https://github.com/bitnami/charts/tree/main/bitnami/common type: library -version: 2.20.3 +version: 2.20.5 diff --git a/argocd-helm-charts/redmine/charts/redmine/charts/common/README.md b/argocd-helm-charts/redmine/charts/redmine/charts/common/README.md index 82d78a384..fee26c991 100644 --- a/argocd-helm-charts/redmine/charts/redmine/charts/common/README.md +++ b/argocd-helm-charts/redmine/charts/redmine/charts/common/README.md @@ -24,7 +24,7 @@ data: myvalue: "Hello World" ``` -Looking to use our applications in production? Try [VMware Tanzu Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog. +Looking to use our applications in production? Try [VMware Tanzu Application Catalog](https://bitnami.com/enterprise), the commercial edition of the Bitnami catalog. ## Introduction diff --git a/argocd-helm-charts/redmine/charts/redmine/charts/common/templates/_resources.tpl b/argocd-helm-charts/redmine/charts/redmine/charts/common/templates/_resources.tpl index b4491f782..d8a43e1c2 100644 --- a/argocd-helm-charts/redmine/charts/redmine/charts/common/templates/_resources.tpl +++ b/argocd-helm-charts/redmine/charts/redmine/charts/common/templates/_resources.tpl @@ -15,31 +15,31 @@ These presets are for basic testing and not meant to be used in production {{- $presets := dict "nano" (dict "requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi") ) "micro" (dict "requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi") ) "small" (dict "requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi") ) "medium" (dict "requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi") ) "large" (dict "requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi") ) "xlarge" (dict "requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi") ) "2xlarge" (dict "requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi") - "limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "1024Mi") + "limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi") ) }} {{- if hasKey $presets .type -}} diff --git a/argocd-helm-charts/redmine/charts/redmine/charts/common/templates/_storage.tpl b/argocd-helm-charts/redmine/charts/redmine/charts/common/templates/_storage.tpl index 7780da18b..aa75856c0 100644 --- a/argocd-helm-charts/redmine/charts/redmine/charts/common/templates/_storage.tpl +++ b/argocd-helm-charts/redmine/charts/redmine/charts/common/templates/_storage.tpl @@ -4,19 +4,18 @@ SPDX-License-Identifier: APACHE-2.0 */}} {{/* vim: set filetype=mustache: */}} + {{/* Return the proper Storage Class {{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} */}} {{- define "common.storage.class" -}} - -{{- $storageClass := default .persistence.storageClass ((.global).storageClass) -}} +{{- $storageClass := (.global).storageClass | default .persistence.storageClass | default (.global).defaultStorageClass | default "" -}} {{- if $storageClass -}} {{- if (eq "-" $storageClass) -}} {{- printf "storageClassName: \"\"" -}} - {{- else }} + {{- else -}} {{- printf "storageClassName: %s" $storageClass -}} {{- end -}} {{- end -}} - {{- end -}} diff --git a/argocd-helm-charts/redmine/charts/redmine/values.yaml b/argocd-helm-charts/redmine/charts/redmine/values.yaml index 0fd652b7c..f036893dc 100644 --- a/argocd-helm-charts/redmine/charts/redmine/values.yaml +++ b/argocd-helm-charts/redmine/charts/redmine/values.yaml @@ -9,7 +9,8 @@ ## @param global.imageRegistry Global Docker image registry ## @param global.imagePullSecrets Global Docker registry secret names as an array -## @param global.storageClass Global StorageClass for Persistent Volume(s) +## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s) +## @param global.storageClass DEPRECATED: use global.defaultStorageClass instead ## global: imageRegistry: "" @@ -18,6 +19,7 @@ global: ## - myRegistryKeySecretName ## imagePullSecrets: [] + defaultStorageClass: "" storageClass: "" ## Compatibility adaptations for Kubernetes platforms ## @@ -83,7 +85,7 @@ diagnosticMode: image: registry: docker.io repository: bitnami/redmine - tag: 5.1.3-debian-12-r1 + tag: 5.1.3-debian-12-r5 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' @@ -994,7 +996,7 @@ certificates: image: registry: docker.io repository: bitnami/os-shell - tag: 12-debian-12-r24 + tag: 12-debian-12-r26 digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' From d36772d7d733bc94720fdc8d90987f0a7357f3ba Mon Sep 17 00:00:00 2001 From: Hritik Batra Date: Wed, 31 Jul 2024 20:50:56 +0530 Subject: [PATCH 19/25] [CI] Helm Chart Update reloader --- CHANGELOG.md | 1 + argocd-helm-charts/reloader/Chart.yaml | 2 +- argocd-helm-charts/reloader/charts/reloader/Chart.yaml | 4 ++-- argocd-helm-charts/reloader/charts/reloader/values.yaml | 4 ++-- argocd-helm-charts/reloader/requirements.lock | 6 +++--- 5 files changed, 9 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ec4fac196..d08b2ff1a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,3 +22,4 @@ All releases and the changes included in them (pulled from git commits added sin - Updated: opensearch-dashboards from version 2.19.0 to 2.19.1 - Updated: rabbitmq-cluster-operator from version 4.3.13 to 4.3.16 - Updated: redmine from version 28.2.7 to 29.0.3 +- Updated: reloader from version 1.0.116 to 1.0.119 diff --git a/argocd-helm-charts/reloader/Chart.yaml b/argocd-helm-charts/reloader/Chart.yaml index f963dd498..3b688a96e 100644 --- a/argocd-helm-charts/reloader/Chart.yaml +++ b/argocd-helm-charts/reloader/Chart.yaml @@ -3,5 +3,5 @@ name: reloader version: v0.0.124 dependencies: - name: reloader - version: 1.0.116 + version: 1.0.119 repository: https://stakater.github.io/stakater-charts diff --git a/argocd-helm-charts/reloader/charts/reloader/Chart.yaml b/argocd-helm-charts/reloader/charts/reloader/Chart.yaml index 8dfdbe669..50bb32934 100644 --- a/argocd-helm-charts/reloader/charts/reloader/Chart.yaml +++ b/argocd-helm-charts/reloader/charts/reloader/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v1 -appVersion: v1.0.116 +appVersion: v1.0.119 description: Reloader chart that runs on kubernetes home: https://github.com/stakater/Reloader icon: https://raw.githubusercontent.com/stakater/Reloader/master/assets/web/reloader-round-100px.png @@ -16,4 +16,4 @@ maintainers: name: reloader sources: - https://github.com/stakater/Reloader -version: 1.0.116 +version: 1.0.119 diff --git a/argocd-helm-charts/reloader/charts/reloader/values.yaml b/argocd-helm-charts/reloader/charts/reloader/values.yaml index 20f9c6687..bb0f55e54 100644 --- a/argocd-helm-charts/reloader/charts/reloader/values.yaml +++ b/argocd-helm-charts/reloader/charts/reloader/values.yaml @@ -92,10 +92,10 @@ reloader: labels: provider: stakater group: com.stakater.platform - version: v1.0.116 + version: v1.0.119 image: name: ghcr.io/stakater/reloader - tag: v1.0.116 + tag: v1.0.119 pullPolicy: IfNotPresent # Support for extra environment variables. env: diff --git a/argocd-helm-charts/reloader/requirements.lock b/argocd-helm-charts/reloader/requirements.lock index b61ac17da..8b12666ce 100644 --- a/argocd-helm-charts/reloader/requirements.lock +++ b/argocd-helm-charts/reloader/requirements.lock @@ -1,6 +1,6 @@ dependencies: - name: reloader repository: https://stakater.github.io/stakater-charts - version: 1.0.116 -digest: sha256:a43bc254044995c77bebac883bd9c89d65fb53097f8746d94cc264ce42846fab -generated: "2024-07-09T02:57:17.987048233+05:30" + version: 1.0.119 +digest: sha256:a2262586c3fd1df27e46c324a2c83f4dc9dcb2e28283cebcbc37cbe860de1844 +generated: "2024-07-31T20:50:39.912013404+05:30" From 7e2f50a7eb269324ef29d6da6e4eb6363b8b58f8 Mon Sep 17 00:00:00 2001 From: Hritik Batra Date: Wed, 31 Jul 2024 20:52:29 +0530 Subject: [PATCH 20/25] [CI] Helm Chart Update rook-ceph --- CHANGELOG.md | 2 + argocd-helm-charts/rook-ceph/Chart.lock | 8 +- argocd-helm-charts/rook-ceph/Chart.yaml | 4 +- .../charts/rook-ceph-cluster/Chart.yaml | 4 +- .../prometheus/localrules.yaml | 288 +++++++++++++++++- .../templates/cephblockpool.yaml | 7 + .../templates/cephecblockpool.yaml | 12 +- .../templates/cephfilesystem.yaml | 7 + .../templates/cephobjectstore.yaml | 8 + .../charts/rook-ceph-cluster/values.yaml | 36 ++- .../rook-ceph/charts/rook-ceph/Chart.yaml | 4 +- .../charts/rook-ceph/templates/configmap.yaml | 6 + .../rook-ceph/charts/rook-ceph/values.yaml | 10 +- 13 files changed, 357 insertions(+), 39 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d08b2ff1a..e06953338 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,3 +23,5 @@ All releases and the changes included in them (pulled from git commits added sin - Updated: rabbitmq-cluster-operator from version 4.3.13 to 4.3.16 - Updated: redmine from version 28.2.7 to 29.0.3 - Updated: reloader from version 1.0.116 to 1.0.119 +- Updated: rook-ceph from version v1.14.8 to v1.14.9 +- Updated: rook-ceph-cluster from version v1.14.4 to v1.14.9 diff --git a/argocd-helm-charts/rook-ceph/Chart.lock b/argocd-helm-charts/rook-ceph/Chart.lock index 70d46fa36..7902cadcf 100644 --- a/argocd-helm-charts/rook-ceph/Chart.lock +++ b/argocd-helm-charts/rook-ceph/Chart.lock @@ -1,9 +1,9 @@ dependencies: - name: rook-ceph repository: https://charts.rook.io/release - version: v1.14.8 + version: v1.14.9 - name: rook-ceph-cluster repository: https://charts.rook.io/release - version: v1.14.4 -digest: sha256:0427e30458d780b7ec12c40cf6b09973fd90ec3072807ba7e5bc17a3189ec89f -generated: "2024-07-09T02:58:02.063763+05:30" + version: v1.14.9 +digest: sha256:57f96290bffaf829a9689aa5cfd581037e5708c1b189e66f651e27a03d4336ff +generated: "2024-07-31T20:52:05.604676141+05:30" diff --git a/argocd-helm-charts/rook-ceph/Chart.yaml b/argocd-helm-charts/rook-ceph/Chart.yaml index da4b59c54..116e6e749 100644 --- a/argocd-helm-charts/rook-ceph/Chart.yaml +++ b/argocd-helm-charts/rook-ceph/Chart.yaml @@ -3,10 +3,10 @@ name: rook-ceph version: 1.0.0 dependencies: - name: rook-ceph - version: v1.14.8 + version: v1.14.9 repository: https://charts.rook.io/release #repository: "oci://ghcr.io/Obmondo" - name: rook-ceph-cluster - version: v1.14.4 + version: v1.14.9 repository: https://charts.rook.io/release #repository: "oci://ghcr.io/Obmondo" diff --git a/argocd-helm-charts/rook-ceph/charts/rook-ceph-cluster/Chart.yaml b/argocd-helm-charts/rook-ceph/charts/rook-ceph-cluster/Chart.yaml index 07364aa22..08acdf845 100644 --- a/argocd-helm-charts/rook-ceph/charts/rook-ceph-cluster/Chart.yaml +++ b/argocd-helm-charts/rook-ceph/charts/rook-ceph-cluster/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: v1.14.4 +appVersion: v1.14.9 dependencies: - name: library repository: file://../library @@ -9,4 +9,4 @@ icon: https://rook.io/images/rook-logo.svg name: rook-ceph-cluster sources: - https://github.com/rook/rook -version: v1.14.4 +version: v1.14.9 diff --git a/argocd-helm-charts/rook-ceph/charts/rook-ceph-cluster/prometheus/localrules.yaml b/argocd-helm-charts/rook-ceph/charts/rook-ceph-cluster/prometheus/localrules.yaml index e91a923dc..77b00e2b6 100644 --- a/argocd-helm-charts/rook-ceph/charts/rook-ceph-cluster/prometheus/localrules.yaml +++ b/argocd-helm-charts/rook-ceph/charts/rook-ceph-cluster/prometheus/localrules.yaml @@ -1,4 +1,8 @@ -# copied from https://github.com/ceph/ceph/blob/master/monitoring/ceph-mixin/prometheus_alerts.yml +# Copied from https://github.com/ceph/ceph/blob/master/monitoring/ceph-mixin/prometheus_alerts.yml +# Attention: This is not a 1:1 copy of ceph-mixin alerts. This file contains several Rook-related adjustments. +# List of main adjustments: +# - Alerts related to cephadm are excluded +# - The PrometheusJobMissing alert is adjusted for the rook-ceph-mgr job, and the PrometheusJobExporterMissing alert is added groups: - name: "cluster health" rules: @@ -198,7 +202,7 @@ groups: type: "ceph_default" - alert: "CephDeviceFailurePredictionTooHigh" annotations: - description: "The device health module has determined that devices predicted to fail can not be remediated automatically, since too many OSDs would be removed from the cluster to ensure performance and availabililty. Prevent data integrity issues by adding new OSDs so that data may be relocated." + description: "The device health module has determined that devices predicted to fail can not be remediated automatically, since too many OSDs would be removed from the cluster to ensure performance and availability. Prevent data integrity issues by adding new OSDs so that data may be relocated." documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#device-health-toomany" summary: "Too many devices are predicted to fail, unable to resolve" expr: "ceph_health_detail{name=\"DEVICE_HEALTH_TOOMANY\"} == 1" @@ -395,7 +399,7 @@ groups: oid: "1.3.6.1.4.1.50495.1.2.1.7.5" severity: "critical" type: "ceph_default" - - alert: "CephPGUnavilableBlockingIO" + - alert: "CephPGUnavailableBlockingIO" annotations: description: "Data availability is reduced, impacting the cluster's ability to service I/O. One or more placement groups (PGs) are in a state that blocks I/O." documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-availability" @@ -499,8 +503,8 @@ groups: type: "ceph_default" - alert: "CephNodeNetworkBondDegraded" annotations: - summary: "Degraded Bond on Node {{ $labels.instance }}" description: "Bond {{ $labels.master }} is degraded on Node {{ $labels.instance }}." + summary: "Degraded Bond on Node {{ $labels.instance }}" expr: | node_bonding_slaves - node_bonding_active != 0 labels: @@ -525,6 +529,15 @@ groups: type: "ceph_default" - name: "pools" rules: + - alert: "CephPoolGrowthWarning" + annotations: + description: "Pool '{{ $labels.name }}' will be full in less than 5 days assuming the average fill-up rate of the past 48 hours." + summary: "Pool growth rate may soon exceed capacity" + expr: "(predict_linear(ceph_pool_percent_used[2d], 3600 * 24 * 5) * on(pool_id, instance, pod) group_right() ceph_pool_metadata) >= 95" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.9.2" + severity: "warning" + type: "ceph_default" - alert: "CephPoolBackfillFull" annotations: description: "A pool is approaching the near full threshold, which will prevent recovery/backfill operations from completing. Consider adding more capacity." @@ -566,15 +579,99 @@ groups: severity: "warning" type: "ceph_default" - alert: "CephDaemonSlowOps" - for: "30s" - expr: "ceph_daemon_health_metrics{type=\"SLOW_OPS\"} > 0" - labels: - severity: 'warning' - type: 'ceph_default' annotations: - summary: "{{ $labels.ceph_daemon }} operations are slow to complete" description: "{{ $labels.ceph_daemon }} operations are taking too long to process (complaint time exceeded)" documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#slow-ops" + summary: "{{ $labels.ceph_daemon }} operations are slow to complete" + expr: "ceph_daemon_health_metrics{type=\"SLOW_OPS\"} > 0" + for: "30s" + labels: + severity: "warning" + type: "ceph_default" + - name: "hardware" + rules: + - alert: "HardwareStorageError" + annotations: + description: "Some storage devices are in error. Check `ceph health detail`." + summary: "Storage devices error(s) detected" + expr: "ceph_health_detail{name=\"HARDWARE_STORAGE\"} > 0" + for: "30s" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.13.1" + severity: "critical" + type: "ceph_default" + - alert: "HardwareMemoryError" + annotations: + description: "DIMM error(s) detected. Check `ceph health detail`." + summary: "DIMM error(s) detected" + expr: "ceph_health_detail{name=\"HARDWARE_MEMORY\"} > 0" + for: "30s" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.13.2" + severity: "critical" + type: "ceph_default" + - alert: "HardwareProcessorError" + annotations: + description: "Processor error(s) detected. Check `ceph health detail`." + summary: "Processor error(s) detected" + expr: "ceph_health_detail{name=\"HARDWARE_PROCESSOR\"} > 0" + for: "30s" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.13.3" + severity: "critical" + type: "ceph_default" + - alert: "HardwareNetworkError" + annotations: + description: "Network error(s) detected. Check `ceph health detail`." + summary: "Network error(s) detected" + expr: "ceph_health_detail{name=\"HARDWARE_NETWORK\"} > 0" + for: "30s" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.13.4" + severity: "critical" + type: "ceph_default" + - alert: "HardwarePowerError" + annotations: + description: "Power supply error(s) detected. Check `ceph health detail`." + summary: "Power supply error(s) detected" + expr: "ceph_health_detail{name=\"HARDWARE_POWER\"} > 0" + for: "30s" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.13.5" + severity: "critical" + type: "ceph_default" + - alert: "HardwareFanError" + annotations: + description: "Fan error(s) detected. Check `ceph health detail`." + summary: "Fan error(s) detected" + expr: "ceph_health_detail{name=\"HARDWARE_FANS\"} > 0" + for: "30s" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.13.6" + severity: "critical" + type: "ceph_default" + - name: "PrometheusServer" + rules: + - alert: "PrometheusJobMissing" + annotations: + description: "The prometheus job that scrapes from Ceph MGR is no longer defined, this will effectively mean you'll have no metrics or alerts for the cluster. Please review the job definitions in the prometheus.yml file of the prometheus instance." + summary: "The scrape job for Ceph MGR is missing from Prometheus" + expr: "absent(up{job=\"rook-ceph-mgr\"})" + for: "30s" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.12.1" + severity: "critical" + type: "ceph_default" + - alert: "PrometheusJobExporterMissing" + annotations: + description: "The prometheus job that scrapes from Ceph Exporter is no longer defined, this will effectively mean you'll have no metrics or alerts for the cluster. Please review the job definitions in the prometheus.yml file of the prometheus instance." + summary: "The scrape job for Ceph Exporter is missing from Prometheus" + expr: "sum(absent(up{job=\"rook-ceph-exporter\"})) and sum(ceph_osd_metadata{ceph_version=~\"^ceph version (1[89]|[2-9][0-9]).*\"}) > 0" + for: "30s" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.12.1" + severity: "critical" + type: "ceph_default" - name: "rados" rules: - alert: "CephObjectMissing" @@ -601,3 +698,174 @@ groups: oid: "1.3.6.1.4.1.50495.1.2.1.1.2" severity: "critical" type: "ceph_default" + - name: "rbdmirror" + rules: + - alert: "CephRBDMirrorImagesPerDaemonHigh" + annotations: + description: "Number of image replications per daemon is not supposed to go beyond threshold 100" + summary: "Number of image replications are now above 100" + expr: "sum by (ceph_daemon, namespace) (ceph_rbd_mirror_snapshot_image_snapshots) > 100" + for: "1m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.10.2" + severity: "critical" + type: "ceph_default" + - alert: "CephRBDMirrorImagesNotInSync" + annotations: + description: "Both local and remote RBD mirror images should be in sync." + summary: "Some of the RBD mirror images are not in sync with the remote counter parts." + expr: "sum by (ceph_daemon, image, namespace, pool) (topk by (ceph_daemon, image, namespace, pool) (1, ceph_rbd_mirror_snapshot_image_local_timestamp) - topk by (ceph_daemon, image, namespace, pool) (1, ceph_rbd_mirror_snapshot_image_remote_timestamp)) != 0" + for: "1m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.10.3" + severity: "critical" + type: "ceph_default" + - alert: "CephRBDMirrorImagesNotInSyncVeryHigh" + annotations: + description: "More than 10% of the images have synchronization problems" + summary: "Number of unsynchronized images are very high." + expr: "count by (ceph_daemon) ((topk by (ceph_daemon, image, namespace, pool) (1, ceph_rbd_mirror_snapshot_image_local_timestamp) - topk by (ceph_daemon, image, namespace, pool) (1, ceph_rbd_mirror_snapshot_image_remote_timestamp)) != 0) > (sum by (ceph_daemon) (ceph_rbd_mirror_snapshot_snapshots)*.1)" + for: "1m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.10.4" + severity: "critical" + type: "ceph_default" + - alert: "CephRBDMirrorImageTransferBandwidthHigh" + annotations: + description: "Detected a heavy increase in bandwidth for rbd replications (over 80%) in the last 30 min. This might not be a problem, but it is good to review the number of images being replicated simultaneously" + summary: "The replication network usage has been increased over 80% in the last 30 minutes. Review the number of images being replicated. This alert will be cleaned automatically after 30 minutes" + expr: "rate(ceph_rbd_mirror_journal_replay_bytes[30m]) > 0.80" + for: "1m" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.10.5" + severity: "warning" + type: "ceph_default" + - name: "nvmeof" + rules: + - alert: "NVMeoFSubsystemNamespaceLimit" + annotations: + description: "Subsystems have a max namespace limit defined at creation time. This alert means that no more namespaces can be added to {{ $labels.nqn }}" + summary: "{{ $labels.nqn }} subsystem has reached its maximum number of namespaces " + expr: "(count by(nqn) (ceph_nvmeof_subsystem_namespace_metadata)) >= ceph_nvmeof_subsystem_namespace_limit" + for: "1m" + labels: + severity: "warning" + type: "ceph_default" + - alert: "NVMeoFTooManyGateways" + annotations: + description: "You may create many gateways, but 4 is the tested limit" + summary: "Max supported gateways exceeded " + expr: "count(ceph_nvmeof_gateway_info) > 4.00" + for: "1m" + labels: + severity: "warning" + type: "ceph_default" + - alert: "NVMeoFMaxGatewayGroupSize" + annotations: + description: "You may create many gateways in a gateway group, but 2 is the tested limit" + summary: "Max gateways within a gateway group ({{ $labels.group }}) exceeded " + expr: "count by(group) (ceph_nvmeof_gateway_info) > 2.00" + for: "1m" + labels: + severity: "warning" + type: "ceph_default" + - alert: "NVMeoFSingleGatewayGroup" + annotations: + description: "Although a single member gateway group is valid, it should only be used for test purposes" + summary: "The gateway group {{ $labels.group }} consists of a single gateway - HA is not possible " + expr: "count by(group) (ceph_nvmeof_gateway_info) == 1" + for: "5m" + labels: + severity: "warning" + type: "ceph_default" + - alert: "NVMeoFHighGatewayCPU" + annotations: + description: "Typically, high CPU may indicate degraded performance. Consider increasing the number of reactor cores" + summary: "CPU used by {{ $labels.instance }} NVMe-oF Gateway is high " + expr: "label_replace(avg by(instance) (rate(ceph_nvmeof_reactor_seconds_total{mode=\"busy\"}[1m])),\"instance\",\"$1\",\"instance\",\"(.*):.*\") > 80.00" + for: "10m" + labels: + severity: "warning" + type: "ceph_default" + - alert: "NVMeoFGatewayOpenSecurity" + annotations: + description: "It is good practice to ensure subsystems use host security to reduce the risk of unexpected data loss" + summary: "Subsystem {{ $labels.nqn }} has been defined without host level security " + expr: "ceph_nvmeof_subsystem_metadata{allow_any_host=\"yes\"}" + for: "5m" + labels: + severity: "warning" + type: "ceph_default" + - alert: "NVMeoFTooManySubsystems" + annotations: + description: "Although you may continue to create subsystems in {{ $labels.gateway_host }}, the configuration may not be supported" + summary: "The number of subsystems defined to the gateway exceeds supported values " + expr: "count by(gateway_host) (label_replace(ceph_nvmeof_subsystem_metadata,\"gateway_host\",\"$1\",\"instance\",\"(.*):.*\")) > 16.00" + for: "1m" + labels: + severity: "warning" + type: "ceph_default" + - alert: "NVMeoFVersionMismatch" + annotations: + description: "This may indicate an issue with deployment. Check cephadm logs" + summary: "The cluster has different NVMe-oF gateway releases active " + expr: "count(count by(version) (ceph_nvmeof_gateway_info)) > 1" + for: "1h" + labels: + severity: "warning" + type: "ceph_default" + - alert: "NVMeoFHighClientCount" + annotations: + description: "The supported limit for clients connecting to a subsystem is 32" + summary: "The number of clients connected to {{ $labels.nqn }} is too high " + expr: "ceph_nvmeof_subsystem_host_count > 32.00" + for: "1m" + labels: + severity: "warning" + type: "ceph_default" + - alert: "NVMeoFHighHostCPU" + annotations: + description: "High CPU on a gateway host can lead to CPU contention and performance degradation" + summary: "The CPU is high ({{ $value }}%) on NVMeoF Gateway host ({{ $labels.host }}) " + expr: "100-((100*(avg by(host) (label_replace(rate(node_cpu_seconds_total{mode=\"idle\"}[5m]),\"host\",\"$1\",\"instance\",\"(.*):.*\")) * on(host) group_right label_replace(ceph_nvmeof_gateway_info,\"host\",\"$1\",\"instance\",\"(.*):.*\")))) >= 80.00" + for: "10m" + labels: + severity: "warning" + type: "ceph_default" + - alert: "NVMeoFInterfaceDown" + annotations: + description: "A NIC used by one or more subsystems is in a down state" + summary: "Network interface {{ $labels.device }} is down " + expr: "ceph_nvmeof_subsystem_listener_iface_info{operstate=\"down\"}" + for: "30s" + labels: + oid: "1.3.6.1.4.1.50495.1.2.1.14.1" + severity: "warning" + type: "ceph_default" + - alert: "NVMeoFInterfaceDuplex" + annotations: + description: "Until this is resolved, performance from the gateway will be degraded" + summary: "Network interface {{ $labels.device }} is not running in full duplex mode " + expr: "ceph_nvmeof_subsystem_listener_iface_info{duplex!=\"full\"}" + for: "30s" + labels: + severity: "warning" + type: "ceph_default" + - alert: "NVMeoFHighReadLatency" + annotations: + description: "High latencies may indicate a constraint within the cluster e.g. CPU, network. Please investigate" + summary: "The average read latency over the last 5 mins has reached 10 ms or more on {{ $labels.gateway }}" + expr: "label_replace((avg by(instance) ((rate(ceph_nvmeof_bdev_read_seconds_total[1m]) / rate(ceph_nvmeof_bdev_reads_completed_total[1m])))),\"gateway\",\"$1\",\"instance\",\"(.*):.*\") > 0.01" + for: "5m" + labels: + severity: "warning" + type: "ceph_default" + - alert: "NVMeoFHighWriteLatency" + annotations: + description: "High latencies may indicate a constraint within the cluster e.g. CPU, network. Please investigate" + summary: "The average write latency over the last 5 mins has reached 20 ms or more on {{ $labels.gateway }}" + expr: "label_replace((avg by(instance) ((rate(ceph_nvmeof_bdev_write_seconds_total[5m]) / rate(ceph_nvmeof_bdev_writes_completed_total[5m])))),\"gateway\",\"$1\",\"instance\",\"(.*):.*\") > 0.02" + for: "5m" + labels: + severity: "warning" + type: "ceph_default" diff --git a/argocd-helm-charts/rook-ceph/charts/rook-ceph-cluster/templates/cephblockpool.yaml b/argocd-helm-charts/rook-ceph/charts/rook-ceph-cluster/templates/cephblockpool.yaml index 23008111d..56257068d 100644 --- a/argocd-helm-charts/rook-ceph/charts/rook-ceph-cluster/templates/cephblockpool.yaml +++ b/argocd-helm-charts/rook-ceph/charts/rook-ceph-cluster/templates/cephblockpool.yaml @@ -14,8 +14,15 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: {{ $blockpool.storageClass.name }} +{{- if $blockpool.storageClass.labels }} + labels: +{{ toYaml $blockpool.storageClass.labels | indent 4 }} +{{- end }} annotations: storageclass.kubernetes.io/is-default-class: "{{ if default false $blockpool.storageClass.isDefault }}true{{ else }}false{{ end }}" +{{- if $blockpool.storageClass.annotations }} +{{ toYaml $blockpool.storageClass.annotations | indent 4 }} +{{- end }} {{- if $root.Values.csiDriverNamePrefix }} provisioner: {{ $root.Values.csiDriverNamePrefix }}.rbd.csi.ceph.com {{- else }} diff --git a/argocd-helm-charts/rook-ceph/charts/rook-ceph-cluster/templates/cephecblockpool.yaml b/argocd-helm-charts/rook-ceph/charts/rook-ceph-cluster/templates/cephecblockpool.yaml index e52c01fce..8a29c66ba 100644 --- a/argocd-helm-charts/rook-ceph/charts/rook-ceph-cluster/templates/cephecblockpool.yaml +++ b/argocd-helm-charts/rook-ceph/charts/rook-ceph-cluster/templates/cephecblockpool.yaml @@ -22,10 +22,16 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: {{ $ecblockpool.storageClass.name }} - {{- if $ecblockpool.storageClass.isDefault }} +{{- if $ecblockpool.storageClass.labels }} + labels: +{{ toYaml $ecblockpool.storageClass.labels | indent 4 }} +{{- end }} annotations: - storageclass.kubernetes.io/is-default-class: "true" - {{end}} + storageclass.kubernetes.io/is-default-class: "{{ if default false $ecblockpool.storageClass.isDefault }}true{{ else }}false{{ end }}" +{{- if $ecblockpool.storageClass.annotations }} +{{ toYaml $ecblockpool.storageClass.annotations | indent 4 }} +{{- end }} + {{- if $root.Values.csiDriverNamePrefix }} provisioner: {{ $root.Values.csiDriverNamePrefix }}.rbd.csi.ceph.com {{- else }} diff --git a/argocd-helm-charts/rook-ceph/charts/rook-ceph-cluster/templates/cephfilesystem.yaml b/argocd-helm-charts/rook-ceph/charts/rook-ceph-cluster/templates/cephfilesystem.yaml index 35c6e7200..bef7ac700 100644 --- a/argocd-helm-charts/rook-ceph/charts/rook-ceph-cluster/templates/cephfilesystem.yaml +++ b/argocd-helm-charts/rook-ceph/charts/rook-ceph-cluster/templates/cephfilesystem.yaml @@ -33,8 +33,15 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: {{ $filesystem.storageClass.name }} +{{- if $filesystem.storageClass.labels }} + labels: +{{ toYaml $filesystem.storageClass.labels | indent 4 }} +{{- end }} annotations: storageclass.kubernetes.io/is-default-class: "{{ if default false $filesystem.storageClass.isDefault }}true{{ else }}false{{ end }}" +{{- if $filesystem.storageClass.annotations }} +{{ toYaml $filesystem.storageClass.annotations | indent 4 }} +{{- end }} {{- if $root.Values.csiDriverNamePrefix }} provisioner: {{ $root.Values.csiDriverNamePrefix }}.cephfs.csi.ceph.com {{- else }} diff --git a/argocd-helm-charts/rook-ceph/charts/rook-ceph-cluster/templates/cephobjectstore.yaml b/argocd-helm-charts/rook-ceph/charts/rook-ceph-cluster/templates/cephobjectstore.yaml index a3b00b20c..ad11f636f 100644 --- a/argocd-helm-charts/rook-ceph/charts/rook-ceph-cluster/templates/cephobjectstore.yaml +++ b/argocd-helm-charts/rook-ceph/charts/rook-ceph-cluster/templates/cephobjectstore.yaml @@ -14,6 +14,14 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: {{ $objectstore.storageClass.name }} +{{- if $objectstore.storageClass.labels }} + labels: +{{ toYaml $objectstore.storageClass.labels | indent 4 }} +{{- end }} +{{- if $objectstore.storageClass.annotations }} + annotations: +{{ toYaml $objectstore.storageClass.annotations | indent 4 }} +{{- end }} provisioner: {{ $root.Release.Namespace }}.ceph.rook.io/bucket reclaimPolicy: {{ default "Delete" $objectstore.storageClass.reclaimPolicy }} volumeBindingMode: {{ default "Immediate" $objectstore.storageClass.volumeBindingMode }} diff --git a/argocd-helm-charts/rook-ceph/charts/rook-ceph-cluster/values.yaml b/argocd-helm-charts/rook-ceph/charts/rook-ceph-cluster/values.yaml index 0ca0f29c2..2210622e5 100644 --- a/argocd-helm-charts/rook-ceph/charts/rook-ceph-cluster/values.yaml +++ b/argocd-helm-charts/rook-ceph/charts/rook-ceph-cluster/values.yaml @@ -25,7 +25,7 @@ toolbox: # -- Enable Ceph debugging pod deployment. See [toolbox](../Troubleshooting/ceph-toolbox.md) enabled: false # -- Toolbox image, defaults to the image used by the Ceph cluster - image: #quay.io/ceph/ceph:v18.2.2 + image: #quay.io/ceph/ceph:v18.2.4 # -- Toolbox tolerations tolerations: [] # -- Toolbox affinity @@ -92,9 +92,9 @@ cephClusterSpec: # v17 is Quincy, v18 is Reef. # RECOMMENDATION: In production, use a specific version tag instead of the general v18 flag, which pulls the latest release and could result in different # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/. - # If you want to be more precise, you can always use a timestamp tag such as quay.io/ceph/ceph:v18.2.2-20240311 + # If you want to be more precise, you can always use a timestamp tag such as quay.io/ceph/ceph:v18.2.4-20240724 # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities - image: quay.io/ceph/ceph:v18.2.2 + image: quay.io/ceph/ceph:v18.2.4 # Whether to allow unsupported versions of Ceph. Currently `quincy`, and `reef` are supported. # Future versions such as `squid` (v19) would require this to be set to `true`. # Do not set to true in production. @@ -157,7 +157,7 @@ cephClusterSpec: # the corresponding "backend protocol" annotation(s) for your ingress controller of choice) ssl: true - # Network configuration, see: https://github.com/rook/rook/blob/v1.14.4/Documentation/CRDs/ceph-cluster-crd.md#network-configuration-settings + # Network configuration, see: https://github.com/rook/rook/blob/v1.14.9/Documentation/CRDs/Cluster/ceph-cluster-crd.md#network-configuration-settings network: connections: # Whether to encrypt the data in transit across the wire to prevent eavesdropping the data on the network. @@ -208,7 +208,7 @@ cephClusterSpec: periodicity: daily # one of: hourly, daily, weekly, monthly maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M. - # automate [data cleanup process](https://github.com/rook/rook/blob/v1.14.4/Documentation/Storage-Configuration/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction. + # automate [data cleanup process](https://github.com/rook/rook/blob/v1.14.9/Documentation/Storage-Configuration/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction. cleanupPolicy: # Since cluster cleanup is destructive to data, confirmation is required. # To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data". @@ -382,7 +382,7 @@ cephClusterSpec: # The section for configuring management of daemon disruptions during upgrade or fencing. disruptionManagement: # If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically - # via the strategy outlined in the [design](https://github.com/rook/rook/blob/v1.14.4/design/ceph/ceph-managed-disruptionbudgets.md). The operator will + # via the strategy outlined in the [design](https://github.com/rook/rook/blob/v1.14.9/design/ceph/ceph-managed-disruptionbudgets.md). The operator will # block eviction of OSDs by default and unblock them safely when drains are detected. managePodBudgets: true # A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the @@ -441,7 +441,7 @@ ingress: # @default -- See [below](#ceph-block-pools) cephBlockPools: - name: ceph-blockpool - # see https://github.com/rook/rook/blob/v1.14.4/Documentation/CRDs/Block-Storage/ceph-block-pool-crd.md#spec for available configuration + # see https://github.com/rook/rook/blob/v1.14.9/Documentation/CRDs/Block-Storage/ceph-block-pool-crd.md#spec for available configuration spec: failureDomain: host replicated: @@ -452,6 +452,8 @@ cephBlockPools: storageClass: enabled: true name: ceph-block + annotations: {} + labels: {} isDefault: true reclaimPolicy: Delete allowVolumeExpansion: true @@ -463,7 +465,7 @@ cephBlockPools: # - key: rook-ceph-role # values: # - storage-node - # see https://github.com/rook/rook/blob/v1.14.4/Documentation/Storage-Configuration/Block-Storage-RBD/block-storage.md#provision-storage for available configuration + # see https://github.com/rook/rook/blob/v1.14.9/Documentation/Storage-Configuration/Block-Storage-RBD/block-storage.md#provision-storage for available configuration parameters: # (optional) mapOptions is a comma-separated list of map options. # For krbd options refer @@ -504,7 +506,7 @@ cephBlockPools: # @default -- See [below](#ceph-file-systems) cephFileSystems: - name: ceph-filesystem - # see https://github.com/rook/rook/blob/v1.14.4/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#filesystem-settings for available configuration + # see https://github.com/rook/rook/blob/v1.14.9/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#filesystem-settings for available configuration spec: metadataPool: replicated: @@ -513,7 +515,7 @@ cephFileSystems: - failureDomain: host replicated: size: 3 - # Optional and highly recommended, 'data0' by default, see https://github.com/rook/rook/blob/v1.14.4/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#pools + # Optional and highly recommended, 'data0' by default, see https://github.com/rook/rook/blob/v1.14.9/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#pools name: data0 metadataServer: activeCount: 1 @@ -534,8 +536,10 @@ cephFileSystems: reclaimPolicy: Delete allowVolumeExpansion: true volumeBindingMode: "Immediate" + annotations: {} + labels: {} mountOptions: [] - # see https://github.com/rook/rook/blob/v1.14.4/Documentation/Storage-Configuration/Shared-Filesystem-CephFS/filesystem-storage.md#provision-storage for available configuration + # see https://github.com/rook/rook/blob/v1.14.9/Documentation/Storage-Configuration/Shared-Filesystem-CephFS/filesystem-storage.md#provision-storage for available configuration parameters: # The secrets contain Ceph admin credentials. csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner @@ -577,7 +581,7 @@ cephBlockPoolsVolumeSnapshotClass: # @default -- See [below](#ceph-object-stores) cephObjectStores: - name: ceph-objectstore - # see https://github.com/rook/rook/blob/v1.14.4/Documentation/CRDs/Object-Storage/ceph-object-store-crd.md#object-store-settings for available configuration + # see https://github.com/rook/rook/blob/v1.14.9/Documentation/CRDs/Object-Storage/ceph-object-store-crd.md#object-store-settings for available configuration spec: metadataPool: failureDomain: host @@ -606,7 +610,9 @@ cephObjectStores: name: ceph-bucket reclaimPolicy: Delete volumeBindingMode: "Immediate" - # see https://github.com/rook/rook/blob/v1.14.4/Documentation/Storage-Configuration/Object-Storage-RGW/ceph-object-bucket-claim.md#storageclass for available configuration + annotations: {} + labels: {} + # see https://github.com/rook/rook/blob/v1.14.9/Documentation/Storage-Configuration/Object-Storage-RGW/ceph-object-bucket-claim.md#storageclass for available configuration parameters: # note: objectStoreNamespace and objectStoreName are configured by the chart region: us-east-1 @@ -669,8 +675,10 @@ cephObjectStores: # storageClass: # provisioner: rook-ceph.rbd.csi.ceph.com # csi-provisioner-name # enabled: true -# name: rook-ceph-block +# name: rook-ceph-block # isDefault: false +# annotations: { } +# labels: { } # allowVolumeExpansion: true # reclaimPolicy: Delete diff --git a/argocd-helm-charts/rook-ceph/charts/rook-ceph/Chart.yaml b/argocd-helm-charts/rook-ceph/charts/rook-ceph/Chart.yaml index 024ecec04..fcb9a9d24 100644 --- a/argocd-helm-charts/rook-ceph/charts/rook-ceph/Chart.yaml +++ b/argocd-helm-charts/rook-ceph/charts/rook-ceph/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: v1.14.8 +appVersion: v1.14.9 dependencies: - name: library repository: file://../library @@ -9,4 +9,4 @@ icon: https://rook.io/images/rook-logo.svg name: rook-ceph sources: - https://github.com/rook/rook -version: v1.14.8 +version: v1.14.9 diff --git a/argocd-helm-charts/rook-ceph/charts/rook-ceph/templates/configmap.yaml b/argocd-helm-charts/rook-ceph/charts/rook-ceph/templates/configmap.yaml index 2d502b8ce..13c8e96c9 100644 --- a/argocd-helm-charts/rook-ceph/charts/rook-ceph/templates/configmap.yaml +++ b/argocd-helm-charts/rook-ceph/charts/rook-ceph/templates/configmap.yaml @@ -251,3 +251,9 @@ data: CSI_RBD_ATTACH_REQUIRED: {{ .Values.csi.rbdAttachRequired | quote }} CSI_NFS_ATTACH_REQUIRED: {{ .Values.csi.nfsAttachRequired | quote }} {{- end }} +{{- if .Values.csi.kubeApiBurst }} + CSI_KUBE_API_BURST: {{ .Values.csi.kubeApiBurst | quote }} +{{- end }} +{{- if .Values.csi.kubeApiQPS }} + CSI_KUBE_API_QPS: {{ .Values.csi.kubeApiQPS | quote }} +{{- end }} diff --git a/argocd-helm-charts/rook-ceph/charts/rook-ceph/values.yaml b/argocd-helm-charts/rook-ceph/charts/rook-ceph/values.yaml index 8c99316bb..5d3a09e64 100644 --- a/argocd-helm-charts/rook-ceph/charts/rook-ceph/values.yaml +++ b/argocd-helm-charts/rook-ceph/charts/rook-ceph/values.yaml @@ -6,8 +6,8 @@ image: # -- Image repository: rook/ceph # -- Image tag - # @default -- `v1.14.8` - tag: v1.14.8 + # @default -- `v1.14.9` + tag: v1.14.9 # -- Image pull policy pullPolicy: IfNotPresent @@ -186,6 +186,12 @@ csi: # -- Allow starting an unsupported ceph-csi image allowUnsupportedVersion: false + # -- Burst to use while communicating with the kubernetes apiserver. + kubeApiBurst: + + # -- QPS to use while communicating with the kubernetes apiserver. + kubeApiQPS: + # -- The volume of the CephCSI RBD plugin DaemonSet csiRBDPluginVolume: # - name: lib-modules From 4c1d0ff9d4d282f0b0275ffe821b82e863f26c5d Mon Sep 17 00:00:00 2001 From: Hritik Batra Date: Wed, 31 Jul 2024 20:53:11 +0530 Subject: [PATCH 21/25] [CI] Helm Chart Update sealed-secrets --- CHANGELOG.md | 1 + argocd-helm-charts/sealed-secrets/Chart.lock | 6 +++--- argocd-helm-charts/sealed-secrets/Chart.yaml | 2 +- .../sealed-secrets/charts/sealed-secrets/Chart.yaml | 4 ++-- .../sealed-secrets/charts/sealed-secrets/README.md | 2 +- .../sealed-secrets/charts/sealed-secrets/values.yaml | 2 +- 6 files changed, 9 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e06953338..125128ce6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,3 +25,4 @@ All releases and the changes included in them (pulled from git commits added sin - Updated: reloader from version 1.0.116 to 1.0.119 - Updated: rook-ceph from version v1.14.8 to v1.14.9 - Updated: rook-ceph-cluster from version v1.14.4 to v1.14.9 +- Updated: sealed-secrets from version 2.16.0 to 2.16.1 diff --git a/argocd-helm-charts/sealed-secrets/Chart.lock b/argocd-helm-charts/sealed-secrets/Chart.lock index cb28f5c80..dd6e66595 100644 --- a/argocd-helm-charts/sealed-secrets/Chart.lock +++ b/argocd-helm-charts/sealed-secrets/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: sealed-secrets repository: https://bitnami-labs.github.io/sealed-secrets - version: 2.16.0 -digest: sha256:71de5c53f45976b0e2ffbd2774bd71d93afca6d677e00c2855720c847b21d287 -generated: "2024-07-09T02:57:54.464701163+05:30" + version: 2.16.1 +digest: sha256:18087e5d5f654cdca8e78c29dd194f874145da362e62324b1048cbfe91e5c261 +generated: "2024-07-31T20:52:53.62960903+05:30" diff --git a/argocd-helm-charts/sealed-secrets/Chart.yaml b/argocd-helm-charts/sealed-secrets/Chart.yaml index e3f9f1abc..f5ff48ef6 100644 --- a/argocd-helm-charts/sealed-secrets/Chart.yaml +++ b/argocd-helm-charts/sealed-secrets/Chart.yaml @@ -4,6 +4,6 @@ version: 0.17.3 # see latest chart (and appversion) here: https://github.com/bitnami-labs/sealed-secrets/tree/main/helm/sealed-secrets dependencies: - name: sealed-secrets - version: 2.16.0 + version: 2.16.1 repository: https://bitnami-labs.github.io/sealed-secrets #repository: "oci://ghcr.io/Obmondo" diff --git a/argocd-helm-charts/sealed-secrets/charts/sealed-secrets/Chart.yaml b/argocd-helm-charts/sealed-secrets/charts/sealed-secrets/Chart.yaml index dac4a0616..40457caf9 100644 --- a/argocd-helm-charts/sealed-secrets/charts/sealed-secrets/Chart.yaml +++ b/argocd-helm-charts/sealed-secrets/charts/sealed-secrets/Chart.yaml @@ -1,7 +1,7 @@ annotations: category: DeveloperTools apiVersion: v2 -appVersion: 0.27.0 +appVersion: 0.27.1 description: Helm chart for the sealed-secrets controller. home: https://github.com/bitnami-labs/sealed-secrets icon: https://bitnami.com/assets/stacks/sealed-secrets/img/sealed-secrets-stack-220x234.png @@ -16,4 +16,4 @@ name: sealed-secrets sources: - https://github.com/bitnami-labs/sealed-secrets type: application -version: 2.16.0 +version: 2.16.1 diff --git a/argocd-helm-charts/sealed-secrets/charts/sealed-secrets/README.md b/argocd-helm-charts/sealed-secrets/charts/sealed-secrets/README.md index 0aa5fd876..84e6b2472 100644 --- a/argocd-helm-charts/sealed-secrets/charts/sealed-secrets/README.md +++ b/argocd-helm-charts/sealed-secrets/charts/sealed-secrets/README.md @@ -86,7 +86,7 @@ The command removes all the Kubernetes components associated with the chart and | ------------------------------------------------- | ----------------------------------------------------------------------------------------------------- | ----------------------------------- | | `image.registry` | Sealed Secrets image registry | `docker.io` | | `image.repository` | Sealed Secrets image repository | `bitnami/sealed-secrets-controller` | -| `image.tag` | Sealed Secrets image tag (immutable tags are recommended) | `0.27.0` | +| `image.tag` | Sealed Secrets image tag (immutable tags are recommended) | `0.27.1` | | `image.pullPolicy` | Sealed Secrets image pull policy | `IfNotPresent` | | `image.pullSecrets` | Sealed Secrets image pull secrets | `[]` | | `revisionHistoryLimit` | Number of old history to retain to allow rollback (If not set, default Kubernetes value is set to 10) | `""` | diff --git a/argocd-helm-charts/sealed-secrets/charts/sealed-secrets/values.yaml b/argocd-helm-charts/sealed-secrets/charts/sealed-secrets/values.yaml index c428b5a43..5c1c78031 100644 --- a/argocd-helm-charts/sealed-secrets/charts/sealed-secrets/values.yaml +++ b/argocd-helm-charts/sealed-secrets/charts/sealed-secrets/values.yaml @@ -39,7 +39,7 @@ commonLabels: {} image: registry: docker.io repository: bitnami/sealed-secrets-controller - tag: 0.27.0 + tag: 0.27.1 ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images From e14a7ed975885737ba17c948ba6dd56313858f7f Mon Sep 17 00:00:00 2001 From: Hritik Batra Date: Wed, 31 Jul 2024 20:54:03 +0530 Subject: [PATCH 22/25] [CI] Helm Chart Update sonarqube --- CHANGELOG.md | 1 + argocd-helm-charts/sonarqube/Chart.lock | 6 ++-- argocd-helm-charts/sonarqube/Chart.yaml | 2 +- .../sonarqube/charts/sonarqube/CHANGELOG.md | 5 +++ .../sonarqube/charts/sonarqube/Chart.yaml | 14 +++----- .../charts/sonarqube/templates/_pod.tpl | 34 +++++-------------- 6 files changed, 22 insertions(+), 40 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 125128ce6..4822be83f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,3 +26,4 @@ All releases and the changes included in them (pulled from git commits added sin - Updated: rook-ceph from version v1.14.8 to v1.14.9 - Updated: rook-ceph-cluster from version v1.14.4 to v1.14.9 - Updated: sealed-secrets from version 2.16.0 to 2.16.1 +- Updated: sonarqube from version 10.6.0+3033 to 10.6.1+3163 diff --git a/argocd-helm-charts/sonarqube/Chart.lock b/argocd-helm-charts/sonarqube/Chart.lock index eccd465dd..e1febed76 100644 --- a/argocd-helm-charts/sonarqube/Chart.lock +++ b/argocd-helm-charts/sonarqube/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: sonarqube repository: https://SonarSource.github.io/helm-chart-sonarqube - version: 10.6.0+3033 -digest: sha256:d8c15e239df003f07c3bf2369c04fbb017433d9a46ef6483fbe21fb441e90945 -generated: "2024-07-09T02:58:36.362680967+05:30" + version: 10.6.1+3163 +digest: sha256:e62573b00bcd78fb788969847a37b21cd3c28be57bacf94eb894d503c76cc904 +generated: "2024-07-31T20:53:45.433020496+05:30" diff --git a/argocd-helm-charts/sonarqube/Chart.yaml b/argocd-helm-charts/sonarqube/Chart.yaml index 381f470cd..0ca98898e 100644 --- a/argocd-helm-charts/sonarqube/Chart.yaml +++ b/argocd-helm-charts/sonarqube/Chart.yaml @@ -9,5 +9,5 @@ name: sonarqube version: 4.0.2+325 dependencies: - name: sonarqube - version: 10.6.0+3033 + version: 10.6.1+3163 repository: https://SonarSource.github.io/helm-chart-sonarqube diff --git a/argocd-helm-charts/sonarqube/charts/sonarqube/CHANGELOG.md b/argocd-helm-charts/sonarqube/charts/sonarqube/CHANGELOG.md index d7115d1f7..ff154e87f 100644 --- a/argocd-helm-charts/sonarqube/charts/sonarqube/CHANGELOG.md +++ b/argocd-helm-charts/sonarqube/charts/sonarqube/CHANGELOG.md @@ -1,6 +1,11 @@ # SonarQube Chart Changelog All changes to this chart will be documented in this file. +## [10.6.1] +* Update Chart's version to 10.6.1 +* Fix a typo in the new common STS template +* Fix regression on env valuesFrom in the new STS template + ## [10.6.0] * Update SonarQube to 10.6.0 * Update Chart's version to 10.6.0 diff --git a/argocd-helm-charts/sonarqube/charts/sonarqube/Chart.yaml b/argocd-helm-charts/sonarqube/charts/sonarqube/Chart.yaml index b662bb8ab..75f319c1d 100644 --- a/argocd-helm-charts/sonarqube/charts/sonarqube/Chart.yaml +++ b/argocd-helm-charts/sonarqube/charts/sonarqube/Chart.yaml @@ -1,17 +1,11 @@ annotations: artifacthub.io/changes: | - kind: changed - description: "Update SonarQube's version to 10.6.0" - - kind: changed - description: "Update Chart's version to 10.6.0" + description: "Update Chart's version to 10.6.1" - kind: fixed - description: "Fix the env-var templating when sourcing from secrets" + description: "Fix typo in the new STS template" - kind: fixed - description: "Fix the postgresql chart's repository link" - - kind: changed - description: "Add support for overriding liveness/readiness probe logic" - - kind: changed - description: "Use a common template for Deployment and StatefulSet" + description: "Fix regression on env valuesFrom in the new STS template" artifacthub.io/containsSecurityUpdates: "false" artifacthub.io/images: | - name: sonarqube @@ -61,4 +55,4 @@ sources: - https://github.com/SonarSource/docker-sonarqube - https://github.com/SonarSource/sonarqube type: application -version: 10.6.0+3033 +version: 10.6.1+3163 diff --git a/argocd-helm-charts/sonarqube/charts/sonarqube/templates/_pod.tpl b/argocd-helm-charts/sonarqube/charts/sonarqube/templates/_pod.tpl index dd5fe2508..04eb7358f 100644 --- a/argocd-helm-charts/sonarqube/charts/sonarqube/templates/_pod.tpl +++ b/argocd-helm-charts/sonarqube/charts/sonarqube/templates/_pod.tpl @@ -16,7 +16,7 @@ metadata: labels: {{- include "sonarqube.selectorLabels" . | nindent 4 }} {{- with .Values.podLabels }} - {{- toYaml . | nindent 8 }} + {{- toYaml . | nindent 4 }} {{- end }} spec: automountServiceAccountToken: false @@ -69,10 +69,7 @@ spec: - mountPath: /tmp/secrets/ca-certs name: ca-certs env: - {{- range (include "sonarqube.combined_env" . | fromJsonArray) }} - - name: {{ .name }} - value: {{ .value | quote }} - {{- end }} + {{- (include "sonarqube.combined_env" . | fromJsonArray) | toYaml | trim | nindent 8 }} {{- end }} {{- if or .Values.initSysctl.enabled .Values.elasticsearch.configureNode }} - name: init-sysctl @@ -89,10 +86,7 @@ spec: - name: init-sysctl mountPath: /tmp/scripts/ env: - {{- range (include "sonarqube.combined_env" . | fromJsonArray) }} - - name: {{ .name }} - value: {{ .value | quote }} - {{- end }} + {{- (include "sonarqube.combined_env" . | fromJsonArray) | toYaml | trim | nindent 8 }} {{- end }} {{- if or .Values.sonarProperties .Values.sonarSecretProperties .Values.sonarSecretKey (not .Values.elasticsearch.bootstrapChecks) }} - name: concat-properties @@ -132,10 +126,7 @@ spec: resources: {{- toYaml . | nindent 8 }} {{- end }} env: - {{- range (include "sonarqube.combined_env" . | fromJsonArray) }} - - name: {{ .name }} - value: {{ .value | quote }} - {{- end }} + {{- (include "sonarqube.combined_env" . | fromJsonArray) | toYaml | trim | nindent 8 }} {{- end }} {{- if .Values.prometheusExporter.enabled }} - name: inject-prometheus-exporter @@ -160,10 +151,7 @@ spec: value: {{ default "" .Values.prometheusExporter.httpsProxy }} - name: no_proxy value: {{ default "" .Values.prometheusExporter.noProxy }} - {{- range (include "sonarqube.combined_env" . | fromJsonArray) }} - - name: {{ .name }} - value: {{ .value | quote }} - {{- end }} + {{- (include "sonarqube.combined_env" . | fromJsonArray) | toYaml | trim | nindent 8 }} {{- end }} {{- if and .Values.persistence.enabled .Values.initFs.enabled }} - name: init-fs @@ -172,7 +160,7 @@ spec: {{- with (default .Values.initContainers.securityContext .Values.initFs.securityContext) }} securityContext: {{- toYaml . | nindent 8 }} {{- end }} - {{- with (default .Values.initContainers.resources .Values.initFs.resources) -}} + {{- with (default .Values.initContainers.resources .Values.initFs.resources) }} resources: {{- toYaml . | nindent 8 }} {{- end }} command: ["sh", "-e", "/tmp/scripts/init_fs.sh"] @@ -236,10 +224,7 @@ spec: value: {{ default "" .Values.plugins.httpsProxy }} - name: no_proxy value: {{ default "" .Values.plugins.noProxy }} - {{- range (include "sonarqube.combined_env" . | fromJsonArray) }} - - name: {{ .name }} - value: {{ .value | quote }} - {{- end }} + {{- (include "sonarqube.combined_env" . | fromJsonArray) | toYaml | trim | nindent 8 }} {{- end }} containers: {{- with .Values.extraContainers }} @@ -279,10 +264,7 @@ spec: name: {{ include "sonarqube.fullname" . }}-monitoring-passcode key: SONAR_WEB_SYSTEMPASSCODE {{- end }} - {{- range (include "sonarqube.combined_env" . | fromJsonArray) }} - - name: {{ .name }} - value: {{ .value | quote }} - {{- end }} + {{- (include "sonarqube.combined_env" . | fromJsonArray) | toYaml | trim | nindent 8 }} envFrom: - configMapRef: name: {{ include "sonarqube.fullname" . }}-jdbc-config From 4dcafb8d129bf7cd1ec5714ee27c8e1f4b0e7f47 Mon Sep 17 00:00:00 2001 From: Hritik Batra Date: Wed, 31 Jul 2024 20:55:34 +0530 Subject: [PATCH 23/25] [CI] Helm Chart Update tigera-operator --- CHANGELOG.md | 1 + argocd-helm-charts/tigera-operator/Chart.lock | 6 +++--- argocd-helm-charts/tigera-operator/Chart.yaml | 2 +- .../tigera-operator/charts/tigera-operator/Chart.yaml | 4 ++-- .../tigera-operator/charts/tigera-operator/values.yaml | 4 ++-- 5 files changed, 9 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4822be83f..1fd973587 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,3 +27,4 @@ All releases and the changes included in them (pulled from git commits added sin - Updated: rook-ceph-cluster from version v1.14.4 to v1.14.9 - Updated: sealed-secrets from version 2.16.0 to 2.16.1 - Updated: sonarqube from version 10.6.0+3033 to 10.6.1+3163 +- Updated: tigera-operator from version v3.28.0 to v3.28.1 diff --git a/argocd-helm-charts/tigera-operator/Chart.lock b/argocd-helm-charts/tigera-operator/Chart.lock index 27df413f6..bd79f44ec 100644 --- a/argocd-helm-charts/tigera-operator/Chart.lock +++ b/argocd-helm-charts/tigera-operator/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: tigera-operator repository: https://projectcalico.docs.tigera.io/charts - version: v3.28.0 -digest: sha256:535c08b8891bf70819a18b6abdb4aebdbdb18d1e1f3d016b8bdb7da2b34e0e58 -generated: "2024-05-17T13:54:32.5759745+02:00" + version: v3.28.1 +digest: sha256:a88f1e566c318271db9781803686933f9906843ae611d01c383bdd33c9f0c637 +generated: "2024-07-31T20:55:16.206874763+05:30" diff --git a/argocd-helm-charts/tigera-operator/Chart.yaml b/argocd-helm-charts/tigera-operator/Chart.yaml index 9e2d7df1e..de1b39e95 100644 --- a/argocd-helm-charts/tigera-operator/Chart.yaml +++ b/argocd-helm-charts/tigera-operator/Chart.yaml @@ -3,7 +3,7 @@ name: tigera-operator version: 1.0.0 dependencies: - name: tigera-operator - version: v3.28.0 + version: v3.28.1 #repository: https://github.com/projectcalico/calico/tree/master/calico/_includes/charts/tigera-operator #repository: "oci://ghcr.io/Obmondo" repository: https://projectcalico.docs.tigera.io/charts diff --git a/argocd-helm-charts/tigera-operator/charts/tigera-operator/Chart.yaml b/argocd-helm-charts/tigera-operator/charts/tigera-operator/Chart.yaml index 369ca984a..0de808281 100644 --- a/argocd-helm-charts/tigera-operator/charts/tigera-operator/Chart.yaml +++ b/argocd-helm-charts/tigera-operator/charts/tigera-operator/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: v3.28.0 +appVersion: v3.28.1 description: Installs the Tigera operator for Calico home: https://projectcalico.docs.tigera.io/about/about-calico icon: https://projectcalico.docs.tigera.io/images/felix_icon.png @@ -8,4 +8,4 @@ sources: - https://github.com/projectcalico/calico/tree/master/calico/_includes/charts/tigera-operator - https://github.com/tigera/operator - https://github.com/projectcalico/calico -version: v3.28.0 +version: v3.28.1 diff --git a/argocd-helm-charts/tigera-operator/charts/tigera-operator/values.yaml b/argocd-helm-charts/tigera-operator/charts/tigera-operator/values.yaml index 8adabe6bb..1d5c6a377 100644 --- a/argocd-helm-charts/tigera-operator/charts/tigera-operator/values.yaml +++ b/argocd-helm-charts/tigera-operator/charts/tigera-operator/values.yaml @@ -61,11 +61,11 @@ podLabels: {} # Image and registry configuration for the tigera/operator pod. tigeraOperator: image: tigera/operator - version: v1.34.0 + version: v1.34.3 registry: quay.io calicoctl: image: docker.io/calico/ctl - tag: v3.28.0 + tag: v3.28.1 kubeletVolumePluginPath: /var/lib/kubelet From 22a0787a7cfcc20aeee92dfc85b57c816f2db069 Mon Sep 17 00:00:00 2001 From: Hritik Batra Date: Wed, 31 Jul 2024 20:56:12 +0530 Subject: [PATCH 24/25] [CI] Helm Chart Update traefik --- CHANGELOG.md | 1 + argocd-helm-charts/traefik/Chart.lock | 6 +- argocd-helm-charts/traefik/Chart.yaml | 2 +- .../traefik/charts/traefik/Changelog.md | 131 ++++++++++++++++++ .../traefik/charts/traefik/Chart.yaml | 19 +-- .../traefik/charts/traefik/EXAMPLES.md | 126 ++++++++++++++++- .../traefik/charts/traefik/README.md | 6 + .../traefik/charts/traefik/VALUES.md | 17 ++- .../charts/traefik/templates/NOTES.txt | 2 +- .../charts/traefik/templates/_helpers.tpl | 14 +- .../charts/traefik/templates/_podtemplate.tpl | 64 +++++---- .../templates/dashboard-ingressroute.yaml | 39 ------ .../charts/traefik/templates/gateway.yaml | 42 ++++-- .../templates/healthcheck-ingressroute.yaml | 39 ------ .../traefik/templates/ingressroute.yaml | 43 ++++++ .../traefik/templates/rbac/clusterrole.yaml | 130 +++++++++-------- .../templates/rbac/clusterrolebinding.yaml | 3 +- .../charts/traefik/templates/rbac/role.yaml | 87 ++---------- .../traefik/templates/requirements.yaml | 6 + .../traefik/charts/traefik/values.yaml | 43 +++++- 20 files changed, 530 insertions(+), 290 deletions(-) delete mode 100644 argocd-helm-charts/traefik/charts/traefik/templates/dashboard-ingressroute.yaml delete mode 100644 argocd-helm-charts/traefik/charts/traefik/templates/healthcheck-ingressroute.yaml create mode 100644 argocd-helm-charts/traefik/charts/traefik/templates/ingressroute.yaml diff --git a/CHANGELOG.md b/CHANGELOG.md index 1fd973587..86b712ba6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,3 +28,4 @@ All releases and the changes included in them (pulled from git commits added sin - Updated: sealed-secrets from version 2.16.0 to 2.16.1 - Updated: sonarqube from version 10.6.0+3033 to 10.6.1+3163 - Updated: tigera-operator from version v3.28.0 to v3.28.1 +- Updated: traefik from version 29.0.0 to 30.0.2 diff --git a/argocd-helm-charts/traefik/Chart.lock b/argocd-helm-charts/traefik/Chart.lock index 290bb832a..9b13b1f21 100644 --- a/argocd-helm-charts/traefik/Chart.lock +++ b/argocd-helm-charts/traefik/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: traefik repository: https://helm.traefik.io/traefik - version: 29.0.0 -digest: sha256:71f4c0d5a1f6c7223bb2931ce03bd2f6d26fb678bcc89680f2ed1d4a69267bbc -generated: "2024-07-09T02:58:58.006500539+05:30" + version: 30.0.2 +digest: sha256:c6ee5906a8910cf2302bc22fe5cf663e87f05be2d97ad766914ebbe53ef51185 +generated: "2024-07-31T20:55:55.433666711+05:30" diff --git a/argocd-helm-charts/traefik/Chart.yaml b/argocd-helm-charts/traefik/Chart.yaml index ef7b3fa1a..f5b02e38f 100644 --- a/argocd-helm-charts/traefik/Chart.yaml +++ b/argocd-helm-charts/traefik/Chart.yaml @@ -3,6 +3,6 @@ name: traefik version: 1.0.0 dependencies: - name: traefik - version: 29.0.0 + version: 30.0.2 repository: https://helm.traefik.io/traefik #repository: "oci://ghcr.io/Obmondo" diff --git a/argocd-helm-charts/traefik/charts/traefik/Changelog.md b/argocd-helm-charts/traefik/charts/traefik/Changelog.md index 065be9f12..4adf40076 100644 --- a/argocd-helm-charts/traefik/charts/traefik/Changelog.md +++ b/argocd-helm-charts/traefik/charts/traefik/Changelog.md @@ -1,5 +1,136 @@ # Change Log +## 30.0.2 ![AppVersion: v3.1.0](https://img.shields.io/static/v1?label=AppVersion&message=v3.1.0&color=success&logo=) ![Kubernetes: >=1.22.0-0](https://img.shields.io/static/v1?label=Kubernetes&message=%3E%3D1.22.0-0&color=informational&logo=kubernetes) ![Helm: v3](https://img.shields.io/static/v1?label=Helm&message=v3&color=informational&logo=helm) + +**Release date:** 2024-07-30 + +* fix(Traefik Hub): missing RBACs for Traefik Hub +* chore(release): 🚀 publish v30.0.2 + +## 30.0.1 ![AppVersion: v3.1.0](https://img.shields.io/static/v1?label=AppVersion&message=v3.1.0&color=success&logo=) ![Kubernetes: >=1.22.0-0](https://img.shields.io/static/v1?label=Kubernetes&message=%3E%3D1.22.0-0&color=informational&logo=kubernetes) ![Helm: v3](https://img.shields.io/static/v1?label=Helm&message=v3&color=informational&logo=helm) + +**Release date:** 2024-07-29 + +* fix(Traefik Hub): support new RBACs for upcoming traefik hub release +* fix(Traefik Hub): RBACs missing with API Gateway +* feat: :release: v30.0.1 + +## 30.0.0 ![AppVersion: v3.1.0](https://img.shields.io/static/v1?label=AppVersion&message=v3.1.0&color=success&logo=) ![Kubernetes: >=1.22.0-0](https://img.shields.io/static/v1?label=Kubernetes&message=%3E%3D1.22.0-0&color=informational&logo=kubernetes) ![Helm: v3](https://img.shields.io/static/v1?label=Helm&message=v3&color=informational&logo=helm) + +**Release date:** 2024-07-24 + +* fix: 🐛 ingressroute default name +* fix: namespaced RBACs hub api gateway +* fix: can't set gateway name +* fix(Gateway API): provide expected roles when using namespaced RBAC +* fix(Gateway API)!: revamp Gateway implementation +* feat: ✨ display release name and image full path in installation notes +* feat: use single ingressRoute template +* feat: handle log filePath and noColor +* chore(release): 🚀 publish v30.0.0 +* chore(deps): update traefik docker tag to v3.1.0 + +**Upgrade Notes** + +There is a breaking upgrade on how to configure Gateway with _values_. +This release supports Traefik Proxy v3.0 **and** v3.1. + +### Default value changes + +```diff +diff --git a/traefik/values.yaml b/traefik/values.yaml +index c8bfd5b..83b6d98 100644 +--- a/traefik/values.yaml ++++ b/traefik/values.yaml +@@ -134,14 +134,36 @@ gateway: + enabled: true + # -- Set a custom name to gateway + name: +- # -- Routes are restricted to namespace of the gateway [by default](https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1beta1.FromNamespaces) +- namespacePolicy: +- # -- See [GatewayTLSConfig](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io%2fv1.GatewayTLSConfig) +- certificateRefs: + # -- By default, Gateway is created in the same `Namespace` than Traefik. + namespace: + # -- Additional gateway annotations (e.g. for cert-manager.io/issuer) + annotations: ++ # -- Define listeners ++ listeners: ++ web: ++ # -- Port is the network port. Multiple listeners may use the same port, subject to the Listener compatibility rules. ++ # The port must match a port declared in ports section. ++ port: 8000 ++ # -- Optional hostname. See [Hostname](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io/v1.Hostname) ++ hostname: ++ # Specify expected protocol on this listener. See [ProtocolType](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io/v1.ProtocolType) ++ protocol: HTTP ++ # -- Routes are restricted to namespace of the gateway [by default](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io/v1.FromNamespaces ++ namespacePolicy: ++ websecure: ++ # -- Port is the network port. Multiple listeners may use the same port, subject to the Listener compatibility rules. ++ # The port must match a port declared in ports section. ++ port: 8443 ++ # -- Optional hostname. See [Hostname](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io/v1.Hostname) ++ hostname: ++ # Specify expected protocol on this listener See [ProtocolType](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io/v1.ProtocolType) ++ protocol: HTTPS ++ # -- Routes are restricted to namespace of the gateway [by default](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io/v1.FromNamespaces) ++ namespacePolicy: ++ # -- Add certificates for TLS or HTTPS protocols. See [GatewayTLSConfig](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io%2fv1.GatewayTLSConfig) ++ certificateRefs: ++ # -- TLS behavior for the TLS session initiated by the client. See [TLSModeType](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io/v1.TLSModeType). ++ mode: + + gatewayClass: + # -- When providers.kubernetesGateway.enabled and gateway.enabled, deploy a default gatewayClass +@@ -161,6 +183,10 @@ ingressRoute: + labels: {} + # -- The router match rule used for the dashboard ingressRoute + matchRule: PathPrefix(`/dashboard`) || PathPrefix(`/api`) ++ # -- The internal service used for the dashboard ingressRoute ++ services: ++ - name: api@internal ++ kind: TraefikService + # -- Specify the allowed entrypoints to use for the dashboard ingress route, (e.g. traefik, web, websecure). + # By default, it's using traefik entrypoint, which is not exposed. + # /!\ Do not expose your dashboard without any protection over the internet /!\ +@@ -178,6 +204,10 @@ ingressRoute: + labels: {} + # -- The router match rule used for the healthcheck ingressRoute + matchRule: PathPrefix(`/ping`) ++ # -- The internal service used for the healthcheck ingressRoute ++ services: ++ - name: ping@internal ++ kind: TraefikService + # -- Specify the allowed entrypoints to use for the healthcheck ingress route, (e.g. traefik, web, websecure). + # By default, it's using traefik entrypoint, which is not exposed. + entryPoints: ["traefik"] +@@ -307,9 +337,12 @@ logs: + # -- Set [logs format](https://doc.traefik.io/traefik/observability/logs/#format) + # @default common + format: +- # By default, the level is set to ERROR. ++ # By default, the level is set to INFO. + # -- Alternative logging levels are DEBUG, PANIC, FATAL, ERROR, WARN, and INFO. + level: INFO ++ # ++ # filePath: "/var/log/traefik/traefik.log ++ # noColor: true + access: + # -- To enable access logs + enabled: false +``` + + +## 29.0.1 ![AppVersion: v3.0.4](https://img.shields.io/static/v1?label=AppVersion&message=v3.0.4&color=success&logo=) ![Kubernetes: >=1.22.0-0](https://img.shields.io/static/v1?label=Kubernetes&message=%3E%3D1.22.0-0&color=informational&logo=kubernetes) ![Helm: v3](https://img.shields.io/static/v1?label=Helm&message=v3&color=informational&logo=helm) + +**Release date:** 2024-07-09 + +* fix: semverCompare failing on some legitimate tags +* fix: RBACs for hub and disabled namespaced RBACs +* chore(release): 🚀 publish v29.0.1 +* chore(deps): update jnorwood/helm-docs docker tag to v1.14.0 + ## 29.0.0 ![AppVersion: v3.0.4](https://img.shields.io/static/v1?label=AppVersion&message=v3.0.4&color=success&logo=) ![Kubernetes: >=1.22.0-0](https://img.shields.io/static/v1?label=Kubernetes&message=%3E%3D1.22.0-0&color=informational&logo=kubernetes) ![Helm: v3](https://img.shields.io/static/v1?label=Helm&message=v3&color=informational&logo=helm) **Upgrade Notes** diff --git a/argocd-helm-charts/traefik/charts/traefik/Chart.yaml b/argocd-helm-charts/traefik/charts/traefik/Chart.yaml index f7d7cfaa7..63145f727 100644 --- a/argocd-helm-charts/traefik/charts/traefik/Chart.yaml +++ b/argocd-helm-charts/traefik/charts/traefik/Chart.yaml @@ -1,19 +1,8 @@ annotations: - artifacthub.io/changes: "- \"fix: \U0001F41B improve error message on additional - service without ports\"\n- \"fix: allow multiples values in the `secretResourceNames` - slice\"\n- \"fix(rbac)!: nodes API permissions for Traefik v3.1+\"\n- \"fix(dashboard): - Only set ingressClass annotation when kubernetesCRD provider is listening for - it\"\n- \"fix!: prometheus operator settings\"\n- \"feat: ✨ update CRDs & RBAC - for Traefik Proxy\"\n- \"feat: ✨ migrate to endpointslices rbac\"\n- \"feat: allow - to set hostAliases for traefik pod\"\n- \"feat(providers): add nativeLBByDefault - support\"\n- \"feat(providers)!: improve kubernetesGateway and Gateway API support\"\n- - \"feat(dashboard)!: dashboard `IngressRoute` should be disabled by default\"\n- - \"docs: fix typos and broken link\"\n- \"chore: update CRDs to v1.5.0\"\n- \"chore: - update CRDs to v1.4.0\"\n- \"chore(release): publish v29.0.0\"\n- \"chore(deps): - update traefik docker tag to v3.0.4\"\n- \"chore(deps): update traefik docker - tag to v3.0.3\"\n" + artifacthub.io/changes: "- \"fix(Traefik Hub): missing RBACs for Traefik Hub\"\n- + \"chore(release): \U0001F680 publish v30.0.2\"\n" apiVersion: v2 -appVersion: v3.0.4 +appVersion: v3.1.0 description: A Traefik based Kubernetes ingress controller home: https://traefik.io/ icon: https://raw.githubusercontent.com/traefik/traefik/v2.3/docs/content/assets/img/traefik.logo.png @@ -35,4 +24,4 @@ sources: - https://github.com/traefik/traefik - https://github.com/traefik/traefik-helm-chart type: application -version: 29.0.0 +version: 30.0.2 diff --git a/argocd-helm-charts/traefik/charts/traefik/EXAMPLES.md b/argocd-helm-charts/traefik/charts/traefik/EXAMPLES.md index f9ec336a5..ee9816824 100644 --- a/argocd-helm-charts/traefik/charts/traefik/EXAMPLES.md +++ b/argocd-helm-charts/traefik/charts/traefik/EXAMPLES.md @@ -24,7 +24,7 @@ deployment: ``` ## Extending DNS config -In order to configure additional DNS servers for your traefik pod, you can use `dnsConfig` option: +In order to configure additional DNS servers for your traefik pod, you can use `dnsConfig` option: ```yaml deployment: @@ -78,15 +78,29 @@ autoscaling: # Access Traefik dashboard without exposing it -This HelmChart does not expose the Traefik dashboard by default, for security concerns. -Thus, there are multiple ways to expose the dashboard. -For instance, the dashboard access could be achieved through a port-forward : +This Chart does not expose the Traefik local dashboard by default. It's explained in upstream [documentation](https://doc.traefik.io/traefik/operations/api/) why: + +> Enabling the API in production is not recommended, because it will expose all configuration elements, including sensitive data. + +It says also: + +> In production, it should be at least secured by authentication and authorizations. + +Thus, there are multiple ways to expose the dashboard. For instance, after enabling the creation of dashboard `IngressRoute` in the values: + +```yaml +ingressRoute: + dashboard: + enabled: true +``` + +The traefik admin port can be forwarded locally: ```bash kubectl port-forward $(kubectl get pods --selector "app.kubernetes.io/name=traefik" --output=name) 9000:9000 ``` -Accessible with the url: http://127.0.0.1:9000/dashboard/ +This command makes the dashboard accessible on the url: http://127.0.0.1:9000/dashboard/ # Publish and protect Traefik Dashboard with basic Auth @@ -876,3 +890,105 @@ spec: Once it's applied, whoami should be accessible on http://whoami.docker.localhost/ + +# Use Kubernetes Gateway API with cert-manager + +One can use the new stable kubernetes gateway API provider with automatic TLS certificates delivery (with cert-manager) setting the following _values_: + +```yaml +providers: + kubernetesGateway: + enabled: true +gateway: + enabled: true + annotations: + cert-manager.io/issuer: selfsigned-issuer + listeners: + websecure: + hostname: whoami.docker.localhost + certificateRefs: + - name: whoami-tls +``` + +Install cert-manager: + +```bash +helm repo add jetstack https://charts.jetstack.io --force-update +helm upgrade --install \ +cert-manager jetstack/cert-manager \ +--namespace cert-manager \ +--create-namespace \ +--version v1.15.1 \ +--set crds.enabled=true \ +--set "extraArgs={--enable-gateway-api}" +``` + +
+ +With those values, a whoami service can be exposed with HTTPRoute on both HTTP and HTTPS + +```yaml +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: whoami +spec: + replicas: 2 + selector: + matchLabels: + app: whoami + template: + metadata: + labels: + app: whoami + spec: + containers: + - name: whoami + image: traefik/whoami + +--- +apiVersion: v1 +kind: Service +metadata: + name: whoami +spec: + selector: + app: whoami + ports: + - protocol: TCP + port: 80 + +--- +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: whoami +spec: + parentRefs: + - name: traefik-gateway + hostnames: + - whoami.docker.localhost + rules: + - matches: + - path: + type: Exact + value: / + + backendRefs: + - name: whoami + port: 80 + weight: 1 + +--- +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: selfsigned-issuer +spec: + selfSigned: {} +``` + +Once it's applied, whoami should be accessible on https://whoami.docker.localhost/ + +
diff --git a/argocd-helm-charts/traefik/charts/traefik/README.md b/argocd-helm-charts/traefik/charts/traefik/README.md index f020bca09..cd963c199 100644 --- a/argocd-helm-charts/traefik/charts/traefik/README.md +++ b/argocd-helm-charts/traefik/charts/traefik/README.md @@ -62,6 +62,12 @@ Due to changes in API Group of Traefik CRDs from `containo.us` to `traefik.io`, helm install traefik traefik/traefik ``` +or: + +```bash +helm install traefik oci://ghcr.io/traefik/helm/traefik +``` + You can customize the install with a `values` file. There are some [EXAMPLES](./EXAMPLES.md) provided. Complete documentation on all available parameters is in the [default file](./traefik/values.yaml). diff --git a/argocd-helm-charts/traefik/charts/traefik/VALUES.md b/argocd-helm-charts/traefik/charts/traefik/VALUES.md index b4907ee00..1ca3baf22 100644 --- a/argocd-helm-charts/traefik/charts/traefik/VALUES.md +++ b/argocd-helm-charts/traefik/charts/traefik/VALUES.md @@ -1,6 +1,6 @@ # traefik -![Version: 29.0.0](https://img.shields.io/badge/Version-29.0.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v3.0.4](https://img.shields.io/badge/AppVersion-v3.0.4-informational?style=flat-square) +![Version: 30.0.2](https://img.shields.io/badge/Version-30.0.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v3.1.0](https://img.shields.io/badge/AppVersion-v3.1.0-informational?style=flat-square) A Traefik based Kubernetes ingress controller @@ -59,11 +59,18 @@ Kubernetes: `>=1.22.0-0` | experimental.plugins | object | `{}` | Enable traefik experimental plugins | | extraObjects | list | `[]` | Extra objects to deploy (value evaluated as a template) In some cases, it can avoid the need for additional, extended or adhoc deployments. See #595 for more details and traefik/tests/values/extra.yaml for example. | | gateway.annotations | string | `nil` | Additional gateway annotations (e.g. for cert-manager.io/issuer) | -| gateway.certificateRefs | string | `nil` | See [GatewayTLSConfig](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io%2fv1.GatewayTLSConfig) | | gateway.enabled | bool | `true` | When providers.kubernetesGateway.enabled, deploy a default gateway | +| gateway.listeners | object | `{"web":{"hostname":null,"namespacePolicy":null,"port":8000,"protocol":"HTTP"},"websecure":{"certificateRefs":null,"hostname":null,"mode":null,"namespacePolicy":null,"port":8443,"protocol":"HTTPS"}}` | Define listeners | +| gateway.listeners.web.hostname | string | `nil` | Optional hostname. See [Hostname](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io/v1.Hostname) | +| gateway.listeners.web.namespacePolicy | string | `nil` | Routes are restricted to namespace of the gateway [by default](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io/v1.FromNamespaces | +| gateway.listeners.web.port | int | `8000` | Port is the network port. Multiple listeners may use the same port, subject to the Listener compatibility rules. The port must match a port declared in ports section. | +| gateway.listeners.websecure.certificateRefs | string | `nil` | Add certificates for TLS or HTTPS protocols. See [GatewayTLSConfig](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io%2fv1.GatewayTLSConfig) | +| gateway.listeners.websecure.hostname | string | `nil` | Optional hostname. See [Hostname](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io/v1.Hostname) | +| gateway.listeners.websecure.mode | string | `nil` | TLS behavior for the TLS session initiated by the client. See [TLSModeType](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io/v1.TLSModeType). | +| gateway.listeners.websecure.namespacePolicy | string | `nil` | Routes are restricted to namespace of the gateway [by default](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io/v1.FromNamespaces) | +| gateway.listeners.websecure.port | int | `8443` | Port is the network port. Multiple listeners may use the same port, subject to the Listener compatibility rules. The port must match a port declared in ports section. | | gateway.name | string | `nil` | Set a custom name to gateway | | gateway.namespace | string | `nil` | By default, Gateway is created in the same `Namespace` than Traefik. | -| gateway.namespacePolicy | string | `nil` | Routes are restricted to namespace of the gateway [by default](https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1beta1.FromNamespaces) | | gatewayClass.enabled | bool | `true` | When providers.kubernetesGateway.enabled and gateway.enabled, deploy a default gatewayClass | | gatewayClass.labels | string | `nil` | Additional gatewayClass labels (e.g. for filtering gateway objects by custom labels) | | gatewayClass.name | string | `nil` | Set a custom name to GatewayClass | @@ -98,6 +105,7 @@ Kubernetes: `>=1.22.0-0` | ingressRoute.dashboard.labels | object | `{}` | Additional ingressRoute labels (e.g. for filtering IngressRoute by custom labels) | | ingressRoute.dashboard.matchRule | string | `"PathPrefix(`/dashboard`) || PathPrefix(`/api`)"` | The router match rule used for the dashboard ingressRoute | | ingressRoute.dashboard.middlewares | list | `[]` | Additional ingressRoute middlewares (e.g. for authentication) | +| ingressRoute.dashboard.services | list | `[{"kind":"TraefikService","name":"api@internal"}]` | The internal service used for the dashboard ingressRoute | | ingressRoute.dashboard.tls | object | `{}` | TLS options (e.g. secret containing certificate) | | ingressRoute.healthcheck.annotations | object | `{}` | Additional ingressRoute annotations (e.g. for kubernetes.io/ingress.class) | | ingressRoute.healthcheck.enabled | bool | `false` | Create an IngressRoute for the healthcheck probe | @@ -105,6 +113,7 @@ Kubernetes: `>=1.22.0-0` | ingressRoute.healthcheck.labels | object | `{}` | Additional ingressRoute labels (e.g. for filtering IngressRoute by custom labels) | | ingressRoute.healthcheck.matchRule | string | `"PathPrefix(`/ping`)"` | The router match rule used for the healthcheck ingressRoute | | ingressRoute.healthcheck.middlewares | list | `[]` | Additional ingressRoute middlewares (e.g. for authentication) | +| ingressRoute.healthcheck.services | list | `[{"kind":"TraefikService","name":"ping@internal"}]` | The internal service used for the healthcheck ingressRoute | | ingressRoute.healthcheck.tls | object | `{}` | TLS options (e.g. secret containing certificate) | | instanceLabelOverride | string | `nil` | | | livenessProbe.failureThreshold | int | `3` | The number of consecutive failures allowed before considering the probe as failed. | @@ -272,4 +281,4 @@ Kubernetes: `>=1.22.0-0` | volumes | list | `[]` | Add volumes to the traefik pod. The volume name will be passed to tpl. This can be used to mount a cert pair or a configmap that holds a config.toml file. After the volume has been mounted, add the configs into traefik by using the `additionalArguments` list below, eg: `additionalArguments: - "--providers.file.filename=/config/dynamic.toml" - "--ping" - "--ping.entrypoint=web"` | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) +Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2) diff --git a/argocd-helm-charts/traefik/charts/traefik/templates/NOTES.txt b/argocd-helm-charts/traefik/charts/traefik/templates/NOTES.txt index e3b8bcaeb..a1a10bfb3 100644 --- a/argocd-helm-charts/traefik/charts/traefik/templates/NOTES.txt +++ b/argocd-helm-charts/traefik/charts/traefik/templates/NOTES.txt @@ -1,6 +1,6 @@ -Traefik Proxy {{ .Values.image.tag | default .Chart.AppVersion }} has been deployed successfully on {{ template "traefik.namespace" . }} namespace ! +{{ .Release.Name }} with {{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }} has been deployed successfully on {{ template "traefik.namespace" . }} namespace ! {{- if .Values.persistence }} {{- if and .Values.persistence.enabled (empty .Values.deployment.initContainer)}} diff --git a/argocd-helm-charts/traefik/charts/traefik/templates/_helpers.tpl b/argocd-helm-charts/traefik/charts/traefik/templates/_helpers.tpl index 7db0b03d9..2183f84ab 100644 --- a/argocd-helm-charts/traefik/charts/traefik/templates/_helpers.tpl +++ b/argocd-helm-charts/traefik/charts/traefik/templates/_helpers.tpl @@ -129,7 +129,19 @@ Renders a complete tree, even values that contains template. {{- end -}} {{- define "imageVersion" -}} -{{ (split "@" (default $.Chart.AppVersion $.Values.image.tag))._0 | replace "latest-" "" }} +{{/* +Traefik hub is based on v3.1 (v3.0 before v3.3.1) of traefik proxy, so this is a hack to avoid to much complexity in RBAC management which are +based on semverCompare +*/}} +{{- if $.Values.hub.token -}} +{{ if and (regexMatch "v[0-9]+.[0-9]+.[0-9]+" (default "" $.Values.image.tag)) (semverCompare "=v3.1.0-0" (.Values.image.tag | default .Chart.AppVersion)) + (semverCompare ">=v3.1.0-0" $version) + (.Values.providers.kubernetesGateway.enabled) (not .Values.rbac.namespaced) (and .Values.rbac.namespaced .Values.providers.kubernetesIngress.enabled (not .Values.providers.kubernetesIngress.disableIngressClassLookup)) }} @@ -15,7 +17,7 @@ metadata: rbac.authorization.k8s.io/aggregate-to-{{ . }}: "true" {{- end }} rules: - {{- if semverCompare ">=v3.1.0-0" (.Values.image.tag | default .Chart.AppVersion) }} + {{- if semverCompare ">=v3.1.0-0" $version }} - apiGroups: - "" resources: @@ -30,14 +32,14 @@ rules: - networking.k8s.io resources: - ingressclasses -{{- if not .Values.rbac.namespaced }} + {{- if not .Values.rbac.namespaced }} - ingresses -{{- end }} + {{- end }} verbs: - get - list - watch -{{- if (.Values.providers.kubernetesGateway).enabled }} + {{- if (.Values.providers.kubernetesGateway).enabled }} - apiGroups: - "" resources: @@ -59,9 +61,9 @@ rules: - gatewayclasses/status verbs: - update -{{- end }} -{{- if not .Values.rbac.namespaced }} - {{- if (semverCompare "=v3.1.0-0" ($.Values.image.tag | default $.Chart.AppVersion)) }} + {{- if (semverCompare ">=v3.1.0-0" $version) }} - apiGroups: - discovery.k8s.io resources: @@ -162,7 +176,7 @@ rules: verbs: - list - watch -{{- end }} + {{- end }} - apiGroups: - gateway.networking.k8s.io resources: @@ -184,74 +198,71 @@ rules: - tlsroutes/status verbs: - update -{{- end -}} -{{- end -}} -{{- if .Values.hub.token }} + {{- end }} + {{- if .Values.hub.token }} - apiGroups: - - hub.traefik.io + - coordination.k8s.io resources: - - accesscontrolpolicies - - apiaccesses - - apiportals - - apiratelimits - - apis - - apiversions + - leases verbs: + - get - list - watch - create - update - patch - delete - - get + {{- end }} +{{- /* not .Values.rbac.namespace */}} +{{- end }} +{{- if .Values.hub.token }} + {{- if or (semverCompare ">=v3.1.0-0" $version) .Values.hub.apimanagement.enabled }} - apiGroups: - "" resources: - - namespaces - - pods + - endpoints verbs: - list + - watch + {{- end }} - apiGroups: - "" resources: - namespaces + {{- if .Values.hub.apimanagement.enabled }} - pods - - nodes + {{- end }} verbs: - get - list + {{- if .Values.hub.apimanagement.enabled }} - watch + {{- end }} + {{- if .Values.hub.apimanagement.enabled }} - apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - apiGroups: - - coordination.k8s.io + - hub.traefik.io resources: - - leases + - accesscontrolpolicies + - apiaccesses + - apiportals + - apiratelimits + - apis + - apiversions verbs: - - get - list - watch - create - update - patch - delete + - get - apiGroups: - "" resources: - - secrets + - events verbs: - - get - - list - - watch - - update - create - - delete - - deletecollection + - patch - apiGroups: - apps resources: @@ -260,15 +271,16 @@ rules: - get - list - watch + {{- if (semverCompare "=v3.1.0-0" (.Values.image.tag | default .Chart.AppVersion)) + (semverCompare ">=v3.1.0-0" $version) (not .Values.rbac.namespaced) (and .Values.rbac.namespaced .Values.providers.kubernetesIngress.enabled (not .Values.providers.kubernetesIngress.disableIngressClassLookup)) }} diff --git a/argocd-helm-charts/traefik/charts/traefik/templates/rbac/role.yaml b/argocd-helm-charts/traefik/charts/traefik/templates/rbac/role.yaml index 5a546ebee..8cd9837cf 100644 --- a/argocd-helm-charts/traefik/charts/traefik/templates/rbac/role.yaml +++ b/argocd-helm-charts/traefik/charts/traefik/templates/rbac/role.yaml @@ -1,3 +1,4 @@ +{{- $version := include "imageVersion" $ }} {{- $ingressNamespaces := concat (include "traefik.namespace" . | list) .Values.providers.kubernetesIngress.namespaces -}} {{- $CRDNamespaces := concat (include "traefik.namespace" . | list) .Values.providers.kubernetesCRD.namespaces -}} {{- $gatewayNamespaces := concat (include "traefik.namespace" . | list) ((.Values.providers.kubernetesGateway).namespaces) -}} @@ -14,7 +15,7 @@ metadata: labels: {{- include "traefik.labels" $ | nindent 4 }} rules: - {{- if (semverCompare "=v3.1.0-0" ($.Values.image.tag | default $.Chart.AppVersion)) }} - - apiGroups: - - discovery.k8s.io - resources: - - endpointslices - verbs: - - list - - watch -{{- end }} - apiGroups: - gateway.networking.k8s.io resources: - - gatewayclasses - gateways - httproutes - referencegrants @@ -156,26 +132,11 @@ rules: - update {{- end -}} {{- if $.Values.hub.token }} - - apiGroups: - - hub.traefik.io - resources: - - accesscontrolpolicies - - apiaccesses - - apiportals - - apiratelimits - - apis - - apiversions - verbs: - - list - - watch - - create - - update - - patch - - delete - - get - apiGroups: - "" resources: + - services + - endpoints - namespaces - pods verbs: @@ -183,12 +144,13 @@ rules: - list - watch - apiGroups: - - "" + - discovery.k8s.io resources: - - events + - endpointslices verbs: - - create - - patch + - get + - list + - watch - apiGroups: - coordination.k8s.io resources: @@ -201,35 +163,6 @@ rules: - update - patch - delete - - apiGroups: - - "" - resources: - - secrets - verbs: - - get - - list - - watch - - update - - create - - delete - - deletecollection - - apiGroups: - - apps - resources: - - replicasets - verbs: - - get - - list - - watch - - apiGroups: - - extensions - - networking.k8s.io - resources: - - ingresses - verbs: - - get - - list - - watch -{{- end -}} +{{- end }} {{- end -}} {{- end -}} diff --git a/argocd-helm-charts/traefik/charts/traefik/templates/requirements.yaml b/argocd-helm-charts/traefik/charts/traefik/templates/requirements.yaml index 79f1f4477..0175ca0be 100644 --- a/argocd-helm-charts/traefik/charts/traefik/templates/requirements.yaml +++ b/argocd-helm-charts/traefik/charts/traefik/templates/requirements.yaml @@ -12,3 +12,9 @@ {{- if and (.Values.providers.kubernetesGateway).enabled (and (semverCompare "<3.1.0-rc3" $version) (not .Values.experimental.kubernetesGateway.enabled)) }} {{- fail "ERROR: Before traefik v3.1.0-rc3, kubernetesGateway is experimental. Enable it by setting experimental.kubernetesGateway.enabled to true" -}} {{- end }} + +{{- if .Values.hub.token }} + {{- if and .Values.hub.apimanagement.enabled (and .Values.rbac.enabled .Values.rbac.namespaced) }} + {{- fail "ERROR: Currently Traefik Hub doesn't support namespaced RBACs" -}} + {{- end }} +{{- end }} diff --git a/argocd-helm-charts/traefik/charts/traefik/values.yaml b/argocd-helm-charts/traefik/charts/traefik/values.yaml index c8bfd5be1..83b6d98bf 100644 --- a/argocd-helm-charts/traefik/charts/traefik/values.yaml +++ b/argocd-helm-charts/traefik/charts/traefik/values.yaml @@ -134,14 +134,36 @@ gateway: enabled: true # -- Set a custom name to gateway name: - # -- Routes are restricted to namespace of the gateway [by default](https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1beta1.FromNamespaces) - namespacePolicy: - # -- See [GatewayTLSConfig](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io%2fv1.GatewayTLSConfig) - certificateRefs: # -- By default, Gateway is created in the same `Namespace` than Traefik. namespace: # -- Additional gateway annotations (e.g. for cert-manager.io/issuer) annotations: + # -- Define listeners + listeners: + web: + # -- Port is the network port. Multiple listeners may use the same port, subject to the Listener compatibility rules. + # The port must match a port declared in ports section. + port: 8000 + # -- Optional hostname. See [Hostname](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io/v1.Hostname) + hostname: + # Specify expected protocol on this listener. See [ProtocolType](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io/v1.ProtocolType) + protocol: HTTP + # -- Routes are restricted to namespace of the gateway [by default](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io/v1.FromNamespaces + namespacePolicy: + websecure: + # -- Port is the network port. Multiple listeners may use the same port, subject to the Listener compatibility rules. + # The port must match a port declared in ports section. + port: 8443 + # -- Optional hostname. See [Hostname](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io/v1.Hostname) + hostname: + # Specify expected protocol on this listener See [ProtocolType](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io/v1.ProtocolType) + protocol: HTTPS + # -- Routes are restricted to namespace of the gateway [by default](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io/v1.FromNamespaces) + namespacePolicy: + # -- Add certificates for TLS or HTTPS protocols. See [GatewayTLSConfig](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io%2fv1.GatewayTLSConfig) + certificateRefs: + # -- TLS behavior for the TLS session initiated by the client. See [TLSModeType](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io/v1.TLSModeType). + mode: gatewayClass: # -- When providers.kubernetesGateway.enabled and gateway.enabled, deploy a default gatewayClass @@ -161,6 +183,10 @@ ingressRoute: labels: {} # -- The router match rule used for the dashboard ingressRoute matchRule: PathPrefix(`/dashboard`) || PathPrefix(`/api`) + # -- The internal service used for the dashboard ingressRoute + services: + - name: api@internal + kind: TraefikService # -- Specify the allowed entrypoints to use for the dashboard ingress route, (e.g. traefik, web, websecure). # By default, it's using traefik entrypoint, which is not exposed. # /!\ Do not expose your dashboard without any protection over the internet /!\ @@ -178,6 +204,10 @@ ingressRoute: labels: {} # -- The router match rule used for the healthcheck ingressRoute matchRule: PathPrefix(`/ping`) + # -- The internal service used for the healthcheck ingressRoute + services: + - name: ping@internal + kind: TraefikService # -- Specify the allowed entrypoints to use for the healthcheck ingress route, (e.g. traefik, web, websecure). # By default, it's using traefik entrypoint, which is not exposed. entryPoints: ["traefik"] @@ -307,9 +337,12 @@ logs: # -- Set [logs format](https://doc.traefik.io/traefik/observability/logs/#format) # @default common format: - # By default, the level is set to ERROR. + # By default, the level is set to INFO. # -- Alternative logging levels are DEBUG, PANIC, FATAL, ERROR, WARN, and INFO. level: INFO + # + # filePath: "/var/log/traefik/traefik.log + # noColor: true access: # -- To enable access logs enabled: false From 9983e997c5f2d461a5d2881aab50ec3746d4e680 Mon Sep 17 00:00:00 2001 From: Hritik Batra Date: Wed, 31 Jul 2024 20:56:59 +0530 Subject: [PATCH 25/25] [CI] Helm Chart Update velero --- CHANGELOG.md | 1 + argocd-helm-charts/velero/Chart.lock | 6 +++--- argocd-helm-charts/velero/Chart.yaml | 2 +- .../velero/charts/velero/Chart.yaml | 2 +- .../charts/velero/templates/deployment.yaml | 21 +++++++++++++++++++ .../templates/serviceaccount-server.yaml | 8 +++---- .../velero/charts/velero/values.yaml | 11 ++++++++++ 7 files changed, 42 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 86b712ba6..668fcd626 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -29,3 +29,4 @@ All releases and the changes included in them (pulled from git commits added sin - Updated: sonarqube from version 10.6.0+3033 to 10.6.1+3163 - Updated: tigera-operator from version v3.28.0 to v3.28.1 - Updated: traefik from version 29.0.0 to 30.0.2 +- Updated: velero from version 7.1.0 to 7.1.4 diff --git a/argocd-helm-charts/velero/Chart.lock b/argocd-helm-charts/velero/Chart.lock index 92c47c116..d1dafc461 100644 --- a/argocd-helm-charts/velero/Chart.lock +++ b/argocd-helm-charts/velero/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: velero repository: https://vmware-tanzu.github.io/helm-charts - version: 7.1.0 -digest: sha256:cbe2e0ff8def6233ad51a947f20faac0ca9a90c605bcb1c4927c5e27e533e6dc -generated: "2024-07-09T02:59:06.059457745+05:30" + version: 7.1.4 +digest: sha256:f013c5f18b6005c96e2d2467f679e0481a81baf74a86c1268256b057d095464d +generated: "2024-07-31T20:56:42.120214436+05:30" diff --git a/argocd-helm-charts/velero/Chart.yaml b/argocd-helm-charts/velero/Chart.yaml index 050ecde7f..2d9730d63 100644 --- a/argocd-helm-charts/velero/Chart.yaml +++ b/argocd-helm-charts/velero/Chart.yaml @@ -4,6 +4,6 @@ name: velero version: 1.0.0 dependencies: - name: velero - version: 7.1.0 + version: 7.1.4 repository: https://vmware-tanzu.github.io/helm-charts #repository: "oci://ghcr.io/Obmondo" diff --git a/argocd-helm-charts/velero/charts/velero/Chart.yaml b/argocd-helm-charts/velero/charts/velero/Chart.yaml index db4b67045..ae3eb0d9b 100644 --- a/argocd-helm-charts/velero/charts/velero/Chart.yaml +++ b/argocd-helm-charts/velero/charts/velero/Chart.yaml @@ -16,4 +16,4 @@ maintainers: name: velero sources: - https://github.com/vmware-tanzu/velero -version: 7.1.0 +version: 7.1.4 diff --git a/argocd-helm-charts/velero/charts/velero/templates/deployment.yaml b/argocd-helm-charts/velero/charts/velero/templates/deployment.yaml index 448e8bf7f..93f80619a 100644 --- a/argocd-helm-charts/velero/charts/velero/templates/deployment.yaml +++ b/argocd-helm-charts/velero/charts/velero/templates/deployment.yaml @@ -166,6 +166,27 @@ spec: {{- with .namespace }} - --namespace={{ . }} {{- end }} + {{- with .repositoryMaintenanceJob }} + {{- with .requests }} + {{- with .cpu }} + - --maintenance-job-cpu-request={{ . }} + {{- end }} + {{- with .memory }} + - --maintenance-job-mem-request={{ . }} + {{- end }} + {{- end }} + {{- with .limits }} + {{- with .cpu }} + - --maintenance-job-cpu-limit={{ . }} + {{- end }} + {{- with .memory }} + - --maintenance-job-mem-limit={{ . }} + {{- end }} + {{- end }} + {{- with .latestJobsCount }} + - --keep-latest-maintenance-jobs={{ . }} + {{- end }} + {{- end }} {{- with .extraArgs }} ### User-supplied overwrite flags {{- toYaml . | nindent 12 }} diff --git a/argocd-helm-charts/velero/charts/velero/templates/serviceaccount-server.yaml b/argocd-helm-charts/velero/charts/velero/templates/serviceaccount-server.yaml index bfaf0d8b3..76feca056 100644 --- a/argocd-helm-charts/velero/charts/velero/templates/serviceaccount-server.yaml +++ b/argocd-helm-charts/velero/charts/velero/templates/serviceaccount-server.yaml @@ -17,9 +17,9 @@ metadata: {{- toYaml . | nindent 4 }} {{- end }} {{- if .Values.serviceAccount.server.imagePullSecrets }} - imagePullSecrets: - {{- range .Values.serviceAccount.server.imagePullSecrets }} - - name: {{ . }} - {{- end }} +imagePullSecrets: +{{- range .Values.serviceAccount.server.imagePullSecrets }} + - name: {{ . }} +{{- end }} {{- end }} {{- end }} diff --git a/argocd-helm-charts/velero/charts/velero/values.yaml b/argocd-helm-charts/velero/charts/velero/values.yaml index 590eef90c..7a9c18ea1 100644 --- a/argocd-helm-charts/velero/charts/velero/values.yaml +++ b/argocd-helm-charts/velero/charts/velero/values.yaml @@ -440,6 +440,17 @@ configuration: # Comma separated list of velero feature flags. default: empty # features: EnableCSI features: + # Resource requests/limits to specify for the repository-maintenance job. Optional. + # https://velero.io/docs/v1.14/repository-maintenance/#resource-limitation + repositoryMaintenanceJob: + requests: + # cpu: 500m + # memory: 512Mi + limits: + # cpu: 1000m + # memory: 1024Mi + # Number of latest maintenance jobs to keep for each repository + latestJobsCount: 3 # `velero server` default: velero namespace: # additional command-line arguments that will be passed to the `velero server`