From e9e3c62db85b0e7cec9f04f20f8e11feef556bdd Mon Sep 17 00:00:00 2001 From: RainbowMango Date: Fri, 11 Oct 2024 11:05:13 +0800 Subject: [PATCH] Update file line terminators to LF Signed-off-by: RainbowMango --- .github/workflows/ci-image-scanning.yaml | 124 +++---- .../v1alpha1/DaemonSet/customizations.yaml | 332 +++++++++--------- .../v1/Policy/testdata/desired-policy.yaml | 40 +-- .../v1/Policy/testdata/observed-policy.yaml | 172 ++++----- .../v1/Policy/testdata/status-file.yaml | 238 ++++++------- 5 files changed, 453 insertions(+), 453 deletions(-) diff --git a/.github/workflows/ci-image-scanning.yaml b/.github/workflows/ci-image-scanning.yaml index 24e51df3214b..acfece19404f 100644 --- a/.github/workflows/ci-image-scanning.yaml +++ b/.github/workflows/ci-image-scanning.yaml @@ -1,62 +1,62 @@ -name: image-scanning -on: - push: - # Exclude branches created by Dependabot to avoid triggering current workflow - # for PRs initiated by Dependabot. - branches-ignore: - - 'dependabot/**' -permissions: - contents: read -jobs: - use-trivy-to-scan-image: - permissions: - security-events: write # for github/codeql-action/upload-sarif to upload SARIF results - name: image-scanning - if: ${{ github.repository == 'karmada-io/karmada' }} - runs-on: ubuntu-22.04 - strategy: - fail-fast: false - matrix: - target: - - karmada-controller-manager - - karmada-scheduler - - karmada-descheduler - - karmada-webhook - - karmada-agent - - karmada-scheduler-estimator - - karmada-interpreter-webhook-example - - karmada-aggregated-apiserver - - karmada-search - - karmada-operator - - karmada-metrics-adapter - steps: - - name: checkout code - uses: actions/checkout@v4 - - name: install Go - uses: actions/setup-go@v5 - with: - go-version-file: go.mod - - name: Build an image from Dockerfile - run: | - export VERSION="latest" - export REGISTRY="docker.io/karmada" - make image-${{ matrix.target }} - - name: Run Trivy vulnerability scanner - uses: aquasecurity/trivy-action@0.24.0 - with: - image-ref: 'docker.io/karmada/${{ matrix.target }}:latest' - format: 'sarif' - ignore-unfixed: true - vuln-type: 'os,library' - output: 'trivy-results.sarif' - - name: display scan results - uses: aquasecurity/trivy-action@0.24.0 - with: - image-ref: 'docker.io/karmada/${{ matrix.target }}:latest' - format: 'table' - ignore-unfixed: true - vuln-type: 'os,library' - - name: Upload Trivy scan results to GitHub Security tab - uses: github/codeql-action/upload-sarif@v3 - with: - sarif_file: 'trivy-results.sarif' +name: image-scanning +on: + push: + # Exclude branches created by Dependabot to avoid triggering current workflow + # for PRs initiated by Dependabot. + branches-ignore: + - 'dependabot/**' +permissions: + contents: read +jobs: + use-trivy-to-scan-image: + permissions: + security-events: write # for github/codeql-action/upload-sarif to upload SARIF results + name: image-scanning + if: ${{ github.repository == 'karmada-io/karmada' }} + runs-on: ubuntu-22.04 + strategy: + fail-fast: false + matrix: + target: + - karmada-controller-manager + - karmada-scheduler + - karmada-descheduler + - karmada-webhook + - karmada-agent + - karmada-scheduler-estimator + - karmada-interpreter-webhook-example + - karmada-aggregated-apiserver + - karmada-search + - karmada-operator + - karmada-metrics-adapter + steps: + - name: checkout code + uses: actions/checkout@v4 + - name: install Go + uses: actions/setup-go@v5 + with: + go-version-file: go.mod + - name: Build an image from Dockerfile + run: | + export VERSION="latest" + export REGISTRY="docker.io/karmada" + make image-${{ matrix.target }} + - name: Run Trivy vulnerability scanner + uses: aquasecurity/trivy-action@0.24.0 + with: + image-ref: 'docker.io/karmada/${{ matrix.target }}:latest' + format: 'sarif' + ignore-unfixed: true + vuln-type: 'os,library' + output: 'trivy-results.sarif' + - name: display scan results + uses: aquasecurity/trivy-action@0.24.0 + with: + image-ref: 'docker.io/karmada/${{ matrix.target }}:latest' + format: 'table' + ignore-unfixed: true + vuln-type: 'os,library' + - name: Upload Trivy scan results to GitHub Security tab + uses: github/codeql-action/upload-sarif@v3 + with: + sarif_file: 'trivy-results.sarif' diff --git a/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/apps.kruise.io/v1alpha1/DaemonSet/customizations.yaml b/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/apps.kruise.io/v1alpha1/DaemonSet/customizations.yaml index e01bbde7d282..8e4dd65cd984 100644 --- a/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/apps.kruise.io/v1alpha1/DaemonSet/customizations.yaml +++ b/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/apps.kruise.io/v1alpha1/DaemonSet/customizations.yaml @@ -1,166 +1,166 @@ -apiVersion: config.karmada.io/v1alpha1 -kind: ResourceInterpreterCustomization -metadata: - name: declarative-configuration-daemonset -spec: - target: - apiVersion: apps.kruise.io/v1alpha1 - kind: DaemonSet - customizations: - statusAggregation: - luaScript: > - function AggregateStatus(desiredObj, statusItems) - if desiredObj.status == nil then - desiredObj.status = {} - end - if desiredObj.metadata.generation == nil then - desiredObj.metadata.generation = 0 - end - - if desiredObj.status.observedGeneration == nil then - desiredObj.status.observedGeneration = 0 - end - - -- Initialize status fields if status doest not exist - -- If the DaemonSet is not spread to any cluster, its status also should be aggregated - if statusItems == nil then - desiredObj.status.observedGeneration = desiredObj.metadata.generation - desiredObj.status.currentNumberScheduled = 0 - desiredObj.status.numberMisscheduled = 0 - desiredObj.status.desiredNumberScheduled = 0 - desiredObj.status.numberReady = 0 - desiredObj.status.updatedNumberScheduled = 0 - desiredObj.status.numberAvailable = 0 - desiredObj.status.numberUnavailable = 0 - desiredObj.status.daemonSetHash = 0 - return desiredObj - end - - local generation = desiredObj.metadata.generation - local observedGeneration = desiredObj.status.observedGeneration - local currentNumberScheduled = 0 - local numberMisscheduled = 0 - local desiredNumberScheduled = 0 - local numberReady = 0 - local updatedNumberScheduled = 0 - local numberAvailable = 0 - local numberUnavailable = 0 - local daemonSetHash = 0 - - -- Count all members that their status is updated to the latest generation - local observedResourceTemplateGenerationCount = 0 - for i = 1, #statusItems do - if statusItems[i].status ~= nil and statusItems[i].status.currentNumberScheduled ~= nil then - currentNumberScheduled = currentNumberScheduled + statusItems[i].status.currentNumberScheduled - end - if statusItems[i].status ~= nil and statusItems[i].status.numberMisscheduled ~= nil then - numberMisscheduled = numberMisscheduled + statusItems[i].status.numberMisscheduled - end - if statusItems[i].status ~= nil and statusItems[i].status.desiredNumberScheduled ~= nil then - desiredNumberScheduled = desiredNumberScheduled + statusItems[i].status.desiredNumberScheduled - end - if statusItems[i].status ~= nil and statusItems[i].status.numberReady ~= nil then - numberReady = numberReady + statusItems[i].status.numberReady - end - if statusItems[i].status ~= nil and statusItems[i].status.updatedNumberScheduled ~= nil then - updatedNumberScheduled = updatedNumberScheduled + statusItems[i].status.updatedNumberScheduled - end - if statusItems[i].status ~= nil and statusItems[i].status.numberAvailable ~= nil then - numberAvailable = numberAvailable + statusItems[i].status.numberAvailable - end - if statusItems[i].status ~= nil and statusItems[i].status.numberUnavailable ~= nil then - numberUnavailable = numberUnavailable + statusItems[i].status.numberUnavailable - end - if statusItems[i].status ~= nil and statusItems[i].status.daemonSetHash ~= nil and statusItems[i].status.daemonSetHash ~= '' then - daemonSetHash = statusItems[i].status.daemonSetHash - end - - -- Check if the member's status is updated to the latest generation - local resourceTemplateGeneration = 0 - if statusItems[i].status ~= nil and statusItems[i].status.resourceTemplateGeneration ~= nil then - resourceTemplateGeneration = statusItems[i].status.resourceTemplateGeneration - end - local memberGeneration = 0 - if statusItems[i].status ~= nil and statusItems[i].status.generation ~= nil then - memberGeneration = statusItems[i].status.generation - end - local memberObservedGeneration = 0 - if statusItems[i].status ~= nil and statusItems[i].status.observedGeneration ~= nil then - memberObservedGeneration = statusItems[i].status.observedGeneration - end - if resourceTemplateGeneration == generation and memberGeneration == memberObservedGeneration then - observedResourceTemplateGenerationCount = observedResourceTemplateGenerationCount + 1 - end - end - - -- Update the observed generation based on the observedResourceTemplateGenerationCount - if observedResourceTemplateGenerationCount == #statusItems then - desiredObj.status.observedGeneration = generation - else - desiredObj.status.observedGeneration = observedGeneration - end - - desiredObj.status.currentNumberScheduled = currentNumberScheduled - desiredObj.status.numberMisscheduled = numberMisscheduled - desiredObj.status.desiredNumberScheduled = desiredNumberScheduled - desiredObj.status.numberReady = numberReady - desiredObj.status.updatedNumberScheduled = updatedNumberScheduled - desiredObj.status.numberAvailable = numberAvailable - desiredObj.status.numberUnavailable = numberUnavailable - desiredObj.status.daemonSetHash = daemonSetHash - return desiredObj - end - statusReflection: - luaScript: > - function ReflectStatus(observedObj) - local status = {} - if observedObj == nil or observedObj.status == nil then - return status - end - status.observedGeneration = observedObj.status.observedGeneration - status.currentNumberScheduled = observedObj.status.currentNumberScheduled - status.numberMisscheduled = observedObj.status.numberMisscheduled - status.desiredNumberScheduled = observedObj.status.desiredNumberScheduled - status.numberReady = observedObj.status.numberReady - status.updatedNumberScheduled = observedObj.status.updatedNumberScheduled - status.numberAvailable = observedObj.status.numberAvailable - status.numberUnavailable = observedObj.status.numberUnavailable - status.daemonSetHash = observedObj.status.daemonSetHash - - -- handle member resource generation report - if observedObj.metadata == nil then - return status - end - status.generation = observedObj.metadata.generation - - -- handle resource template generation report - if observedObj.metadata.annotations == nil then - return status - end - local resourceTemplateGeneration = tonumber(observedObj.metadata.annotations["resourcetemplate.karmada.io/generation"]) - if resourceTemplateGeneration ~= nil then - status.resourceTemplateGeneration = resourceTemplateGeneration - end - return status - end - healthInterpretation: - luaScript: > - function InterpretHealth(observedObj) - if observedObj.status.observedGeneration ~= observedObj.metadata.generation then - return false - end - if observedObj.status.updatedNumberScheduled < observedObj.status.desiredNumberScheduled then - return false - end - if observedObj.status.numberAvailable < observedObj.status.updatedNumberScheduled then - return false - end - return true - end - dependencyInterpretation: - luaScript: > - local kube = require("kube") - function GetDependencies(desiredObj) - refs = kube.getPodDependencies(desiredObj.spec.template, desiredObj.metadata.namespace) - return refs - end +apiVersion: config.karmada.io/v1alpha1 +kind: ResourceInterpreterCustomization +metadata: + name: declarative-configuration-daemonset +spec: + target: + apiVersion: apps.kruise.io/v1alpha1 + kind: DaemonSet + customizations: + statusAggregation: + luaScript: > + function AggregateStatus(desiredObj, statusItems) + if desiredObj.status == nil then + desiredObj.status = {} + end + if desiredObj.metadata.generation == nil then + desiredObj.metadata.generation = 0 + end + + if desiredObj.status.observedGeneration == nil then + desiredObj.status.observedGeneration = 0 + end + + -- Initialize status fields if status doest not exist + -- If the DaemonSet is not spread to any cluster, its status also should be aggregated + if statusItems == nil then + desiredObj.status.observedGeneration = desiredObj.metadata.generation + desiredObj.status.currentNumberScheduled = 0 + desiredObj.status.numberMisscheduled = 0 + desiredObj.status.desiredNumberScheduled = 0 + desiredObj.status.numberReady = 0 + desiredObj.status.updatedNumberScheduled = 0 + desiredObj.status.numberAvailable = 0 + desiredObj.status.numberUnavailable = 0 + desiredObj.status.daemonSetHash = 0 + return desiredObj + end + + local generation = desiredObj.metadata.generation + local observedGeneration = desiredObj.status.observedGeneration + local currentNumberScheduled = 0 + local numberMisscheduled = 0 + local desiredNumberScheduled = 0 + local numberReady = 0 + local updatedNumberScheduled = 0 + local numberAvailable = 0 + local numberUnavailable = 0 + local daemonSetHash = 0 + + -- Count all members that their status is updated to the latest generation + local observedResourceTemplateGenerationCount = 0 + for i = 1, #statusItems do + if statusItems[i].status ~= nil and statusItems[i].status.currentNumberScheduled ~= nil then + currentNumberScheduled = currentNumberScheduled + statusItems[i].status.currentNumberScheduled + end + if statusItems[i].status ~= nil and statusItems[i].status.numberMisscheduled ~= nil then + numberMisscheduled = numberMisscheduled + statusItems[i].status.numberMisscheduled + end + if statusItems[i].status ~= nil and statusItems[i].status.desiredNumberScheduled ~= nil then + desiredNumberScheduled = desiredNumberScheduled + statusItems[i].status.desiredNumberScheduled + end + if statusItems[i].status ~= nil and statusItems[i].status.numberReady ~= nil then + numberReady = numberReady + statusItems[i].status.numberReady + end + if statusItems[i].status ~= nil and statusItems[i].status.updatedNumberScheduled ~= nil then + updatedNumberScheduled = updatedNumberScheduled + statusItems[i].status.updatedNumberScheduled + end + if statusItems[i].status ~= nil and statusItems[i].status.numberAvailable ~= nil then + numberAvailable = numberAvailable + statusItems[i].status.numberAvailable + end + if statusItems[i].status ~= nil and statusItems[i].status.numberUnavailable ~= nil then + numberUnavailable = numberUnavailable + statusItems[i].status.numberUnavailable + end + if statusItems[i].status ~= nil and statusItems[i].status.daemonSetHash ~= nil and statusItems[i].status.daemonSetHash ~= '' then + daemonSetHash = statusItems[i].status.daemonSetHash + end + + -- Check if the member's status is updated to the latest generation + local resourceTemplateGeneration = 0 + if statusItems[i].status ~= nil and statusItems[i].status.resourceTemplateGeneration ~= nil then + resourceTemplateGeneration = statusItems[i].status.resourceTemplateGeneration + end + local memberGeneration = 0 + if statusItems[i].status ~= nil and statusItems[i].status.generation ~= nil then + memberGeneration = statusItems[i].status.generation + end + local memberObservedGeneration = 0 + if statusItems[i].status ~= nil and statusItems[i].status.observedGeneration ~= nil then + memberObservedGeneration = statusItems[i].status.observedGeneration + end + if resourceTemplateGeneration == generation and memberGeneration == memberObservedGeneration then + observedResourceTemplateGenerationCount = observedResourceTemplateGenerationCount + 1 + end + end + + -- Update the observed generation based on the observedResourceTemplateGenerationCount + if observedResourceTemplateGenerationCount == #statusItems then + desiredObj.status.observedGeneration = generation + else + desiredObj.status.observedGeneration = observedGeneration + end + + desiredObj.status.currentNumberScheduled = currentNumberScheduled + desiredObj.status.numberMisscheduled = numberMisscheduled + desiredObj.status.desiredNumberScheduled = desiredNumberScheduled + desiredObj.status.numberReady = numberReady + desiredObj.status.updatedNumberScheduled = updatedNumberScheduled + desiredObj.status.numberAvailable = numberAvailable + desiredObj.status.numberUnavailable = numberUnavailable + desiredObj.status.daemonSetHash = daemonSetHash + return desiredObj + end + statusReflection: + luaScript: > + function ReflectStatus(observedObj) + local status = {} + if observedObj == nil or observedObj.status == nil then + return status + end + status.observedGeneration = observedObj.status.observedGeneration + status.currentNumberScheduled = observedObj.status.currentNumberScheduled + status.numberMisscheduled = observedObj.status.numberMisscheduled + status.desiredNumberScheduled = observedObj.status.desiredNumberScheduled + status.numberReady = observedObj.status.numberReady + status.updatedNumberScheduled = observedObj.status.updatedNumberScheduled + status.numberAvailable = observedObj.status.numberAvailable + status.numberUnavailable = observedObj.status.numberUnavailable + status.daemonSetHash = observedObj.status.daemonSetHash + + -- handle member resource generation report + if observedObj.metadata == nil then + return status + end + status.generation = observedObj.metadata.generation + + -- handle resource template generation report + if observedObj.metadata.annotations == nil then + return status + end + local resourceTemplateGeneration = tonumber(observedObj.metadata.annotations["resourcetemplate.karmada.io/generation"]) + if resourceTemplateGeneration ~= nil then + status.resourceTemplateGeneration = resourceTemplateGeneration + end + return status + end + healthInterpretation: + luaScript: > + function InterpretHealth(observedObj) + if observedObj.status.observedGeneration ~= observedObj.metadata.generation then + return false + end + if observedObj.status.updatedNumberScheduled < observedObj.status.desiredNumberScheduled then + return false + end + if observedObj.status.numberAvailable < observedObj.status.updatedNumberScheduled then + return false + end + return true + end + dependencyInterpretation: + luaScript: > + local kube = require("kube") + function GetDependencies(desiredObj) + refs = kube.getPodDependencies(desiredObj.spec.template, desiredObj.metadata.namespace) + return refs + end diff --git a/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/kyverno.io/v1/Policy/testdata/desired-policy.yaml b/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/kyverno.io/v1/Policy/testdata/desired-policy.yaml index f69acb336662..3f6fe0b0be33 100644 --- a/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/kyverno.io/v1/Policy/testdata/desired-policy.yaml +++ b/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/kyverno.io/v1/Policy/testdata/desired-policy.yaml @@ -1,20 +1,20 @@ -apiVersion: kyverno.io/v1 -kind: Policy -metadata: - name: sample - namespace: test-policy -spec: - validationFailureAction: Enforce - rules: - - name: require-pod-purpose-label - match: - any: - - resources: - kinds: - - Pod - validate: - message: "You must have label `purpose` with value `production` set on all new Pod in test-policy Namespace." - pattern: - metadata: - labels: - purpose: production +apiVersion: kyverno.io/v1 +kind: Policy +metadata: + name: sample + namespace: test-policy +spec: + validationFailureAction: Enforce + rules: + - name: require-pod-purpose-label + match: + any: + - resources: + kinds: + - Pod + validate: + message: "You must have label `purpose` with value `production` set on all new Pod in test-policy Namespace." + pattern: + metadata: + labels: + purpose: production diff --git a/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/kyverno.io/v1/Policy/testdata/observed-policy.yaml b/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/kyverno.io/v1/Policy/testdata/observed-policy.yaml index b972ee9804c4..64050a5e4237 100644 --- a/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/kyverno.io/v1/Policy/testdata/observed-policy.yaml +++ b/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/kyverno.io/v1/Policy/testdata/observed-policy.yaml @@ -1,86 +1,86 @@ -apiVersion: kyverno.io/v1 -kind: Policy -metadata: - name: sample - namespace: test-policy -spec: - validationFailureAction: Enforce - rules: - - name: require-pod-purpose-label - match: - any: - - resources: - kinds: - - Pod - validate: - message: "You must have label `purpose` with value `production` set on all new Pod in test-policy Namespace." - pattern: - metadata: - labels: - purpose: production -status: - autogen: - rules: - - exclude: - resources: {} - generate: - clone: {} - cloneList: {} - match: - any: - - resources: - kinds: - - DaemonSet - - Deployment - - Job - - StatefulSet - - ReplicaSet - - ReplicationController - resources: {} - mutate: {} - name: autogen-require-pod-purpose-label - validate: - message: You must have label `purpose` with value `production` set on all - new Pod in test-policy Namespace. - pattern: - spec: - template: - metadata: - labels: - purpose: production - - exclude: - resources: {} - generate: - clone: {} - cloneList: {} - match: - any: - - resources: - kinds: - - CronJob - resources: {} - mutate: {} - name: autogen-cronjob-require-pod-purpose-label - validate: - message: You must have label `purpose` with value `production` set on all - new Pod in test-policy Namespace. - pattern: - spec: - jobTemplate: - spec: - template: - metadata: - labels: - purpose: production - conditions: - - lastTransitionTime: "2023-05-07T09:19:06Z" - message: "" - reason: Succeeded - status: "True" - type: Ready - ready: true - rulecount: - generate: 0 - mutate: 0 - validate: 1 - verifyimages: 0 +apiVersion: kyverno.io/v1 +kind: Policy +metadata: + name: sample + namespace: test-policy +spec: + validationFailureAction: Enforce + rules: + - name: require-pod-purpose-label + match: + any: + - resources: + kinds: + - Pod + validate: + message: "You must have label `purpose` with value `production` set on all new Pod in test-policy Namespace." + pattern: + metadata: + labels: + purpose: production +status: + autogen: + rules: + - exclude: + resources: {} + generate: + clone: {} + cloneList: {} + match: + any: + - resources: + kinds: + - DaemonSet + - Deployment + - Job + - StatefulSet + - ReplicaSet + - ReplicationController + resources: {} + mutate: {} + name: autogen-require-pod-purpose-label + validate: + message: You must have label `purpose` with value `production` set on all + new Pod in test-policy Namespace. + pattern: + spec: + template: + metadata: + labels: + purpose: production + - exclude: + resources: {} + generate: + clone: {} + cloneList: {} + match: + any: + - resources: + kinds: + - CronJob + resources: {} + mutate: {} + name: autogen-cronjob-require-pod-purpose-label + validate: + message: You must have label `purpose` with value `production` set on all + new Pod in test-policy Namespace. + pattern: + spec: + jobTemplate: + spec: + template: + metadata: + labels: + purpose: production + conditions: + - lastTransitionTime: "2023-05-07T09:19:06Z" + message: "" + reason: Succeeded + status: "True" + type: Ready + ready: true + rulecount: + generate: 0 + mutate: 0 + validate: 1 + verifyimages: 0 diff --git a/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/kyverno.io/v1/Policy/testdata/status-file.yaml b/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/kyverno.io/v1/Policy/testdata/status-file.yaml index 17cda3cd42db..0f16a7b8ce39 100644 --- a/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/kyverno.io/v1/Policy/testdata/status-file.yaml +++ b/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/kyverno.io/v1/Policy/testdata/status-file.yaml @@ -1,119 +1,119 @@ -applied: true -clusterName: member2 -health: Healthy -status: - autogen: - rules: - - exclude: [] - generate: [] - match: - any: - - resources: - kinds: - - DaemonSet - - Deployment - - Job - - StatefulSet - - ReplicaSet - - ReplicationController - name: autogen-require-pod-purpose-label - validate: - message: You must have label `purpose` with value `production` set on - all new Pod in test-policy Namespace. - pattern: - spec: - template: - metadata: - labels: - purpose: production - - exclude: [] - generate: [] - match: - any: - - resources: - kinds: - - CronJob - name: autogen-cronjob-require-pod-purpose-label - validate: - message: You must have label `purpose` with value `production` set on - all new Pod in test-policy Namespace. - pattern: - spec: - jobTemplate: - spec: - template: - metadata: - labels: - purpose: production - conditions: - - lastTransitionTime: "2023-05-07T09:19:06Z" - message: "" - reason: Succeeded - status: "True" - type: Ready - ready: true - rulecount: - generate: 0 - mutate: 0 - validate: 1 - verifyimages: 0 ---- -applied: true -clusterName: member3 -health: Healthy -status: - autogen: - rules: - - exclude: [] - generate: [] - match: - any: - - resources: - kinds: - - DaemonSet - - Deployment - - Job - - StatefulSet - - ReplicaSet - - ReplicationController - name: autogen-require-pod-purpose-label - validate: - message: You must have label `purpose` with value `production` set on - all new Pod in test-policy Namespace. - pattern: - spec: - template: - metadata: - labels: - purpose: production - - exclude: [] - generate: [] - match: - any: - - resources: - kinds: - - CronJob - name: autogen-cronjob-require-pod-purpose-label - validate: - message: You must have label `purpose` with value `production` set on - all new Pod in test-policy Namespace. - pattern: - spec: - jobTemplate: - spec: - template: - metadata: - labels: - purpose: production - conditions: - - lastTransitionTime: "2023-05-07T09:19:06Z" - message: "" - reason: Succeeded - status: "True" - type: Ready - ready: true - rulecount: - generate: 0 - mutate: 0 - validate: 1 - verifyimages: 0 +applied: true +clusterName: member2 +health: Healthy +status: + autogen: + rules: + - exclude: [] + generate: [] + match: + any: + - resources: + kinds: + - DaemonSet + - Deployment + - Job + - StatefulSet + - ReplicaSet + - ReplicationController + name: autogen-require-pod-purpose-label + validate: + message: You must have label `purpose` with value `production` set on + all new Pod in test-policy Namespace. + pattern: + spec: + template: + metadata: + labels: + purpose: production + - exclude: [] + generate: [] + match: + any: + - resources: + kinds: + - CronJob + name: autogen-cronjob-require-pod-purpose-label + validate: + message: You must have label `purpose` with value `production` set on + all new Pod in test-policy Namespace. + pattern: + spec: + jobTemplate: + spec: + template: + metadata: + labels: + purpose: production + conditions: + - lastTransitionTime: "2023-05-07T09:19:06Z" + message: "" + reason: Succeeded + status: "True" + type: Ready + ready: true + rulecount: + generate: 0 + mutate: 0 + validate: 1 + verifyimages: 0 +--- +applied: true +clusterName: member3 +health: Healthy +status: + autogen: + rules: + - exclude: [] + generate: [] + match: + any: + - resources: + kinds: + - DaemonSet + - Deployment + - Job + - StatefulSet + - ReplicaSet + - ReplicationController + name: autogen-require-pod-purpose-label + validate: + message: You must have label `purpose` with value `production` set on + all new Pod in test-policy Namespace. + pattern: + spec: + template: + metadata: + labels: + purpose: production + - exclude: [] + generate: [] + match: + any: + - resources: + kinds: + - CronJob + name: autogen-cronjob-require-pod-purpose-label + validate: + message: You must have label `purpose` with value `production` set on + all new Pod in test-policy Namespace. + pattern: + spec: + jobTemplate: + spec: + template: + metadata: + labels: + purpose: production + conditions: + - lastTransitionTime: "2023-05-07T09:19:06Z" + message: "" + reason: Succeeded + status: "True" + type: Ready + ready: true + rulecount: + generate: 0 + mutate: 0 + validate: 1 + verifyimages: 0