diff --git a/.github/actions/run-tests/action.yaml b/.github/actions/run-tests/action.yaml new file mode 100644 index 000000000..9edeebb66 --- /dev/null +++ b/.github/actions/run-tests/action.yaml @@ -0,0 +1,16 @@ +name: "Runs E2E Tests" +description: "Runs E2E tests using chainsaw" +inputs: + tests: + description: "Test regex" + required: true +runs: + using: "composite" + steps: + - name: Install Chainsaw + uses: kyverno/action-install-chainsaw@5d00c353f61f44f3b492c673420202d1b1374c3f # v0.2.6 + - name: Test with Chainsaw + shell: bash + run: | + set -e + chainsaw test --config .chainsaw.yaml --include-test-regex '^chainsaw$/${{ inputs.tests }}' --no-color=false diff --git a/.github/actions/setup-env/action.yaml b/.github/actions/setup-env/action.yaml new file mode 100644 index 000000000..8bdbd708a --- /dev/null +++ b/.github/actions/setup-env/action.yaml @@ -0,0 +1,51 @@ +name: "Setup Environment for E2E Tests" +description: "Sets up the environment for the E2E workflows" +inputs: + k8s-version: + description: "Kubernetes version" + required: true +runs: + using: "composite" + steps: + - name: Setup Go + uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 + with: + go-version: ~1.21.1 + - name: Install Tools + shell: bash + run: | + set -e + curl -LO "https://dl.k8s.io/release/${{ inputs.k8s-version }}/bin/linux/amd64/kubectl" + sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl + - name: Install kind + shell: bash + run: | + set -e + # For AMD64 / x86_64 + [ $(uname -m) = x86_64 ] && curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.20.0/kind-linux-amd64 + # For ARM64 + [ $(uname -m) = aarch64 ] && curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.20.0/kind-linux-arm64 + chmod +x ./kind + sudo mv ./kind /usr/local/bin/kind + - name: Install latest Kyverno CLI + uses: kyverno/action-install-cli@fcee92fca5c883169ef9927acf543e0b5fc58289 # v0.2.0 + - name: Create kind cluster + shell: bash + run: | + set -e + kind create cluster --image kindest/node:${{ inputs.k8s-version }} --config ./.github/kind.yml + - name: Install latest kyverno + shell: bash + run: | + set -e + kubectl create -f https://github.com/kyverno/kyverno/raw/main/config/install-latest-testing.yaml + - name: Wait for kyverno ready + shell: bash + run: | + set -e + kubectl wait --namespace kyverno --for=condition=ready pod --selector '!job-name' --timeout=60s + - name: Install CRDs + shell: bash + run: | + set -e + kubectl apply -f ./.chainsaw/crds diff --git a/.github/kind.yml b/.github/kind.yml index 9438061e5..1f6e2eee6 100644 --- a/.github/kind.yml +++ b/.github/kind.yml @@ -1,5 +1,7 @@ kind: Cluster apiVersion: kind.x-k8s.io/v1alpha4 +featureGates: + ProcMountType: true kubeadmConfigPatches: - |- kind: ClusterConfiguration diff --git a/.github/workflows/cel-test.yml b/.github/workflows/cel-test.yml new file mode 100644 index 000000000..ec7564140 --- /dev/null +++ b/.github/workflows/cel-test.yml @@ -0,0 +1,65 @@ +name: E2E Tests - CEL + +permissions: {} + +on: + workflow_dispatch: {} + pull_request: + branches: + - 'main' + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + chainsaw: + strategy: + fail-fast: false + matrix: + k8s-version: + - name: v1.25 + version: v1.25.16 + - name: v1.26 + version: v1.26.14 + - name: v1.27 + version: v1.27.11 + - name: v1.28 + version: v1.28.7 + - name: v1.29 + version: v1.29.2 + tests: + - ^argo-cel$ + - ^aws-cel$ + - ^best-practices-cel$ + - ^consul-cel$ + - ^flux-cel$ + - ^istio-cel$ + - ^kasten-cel$ + - ^kubecost-cel$ + - ^linkerd-cel$ + - ^nginx-ingress-cel$ + - ^openshift-cel$ + - ^other-cel$/^a + - ^other-cel$/^[b-d] + - ^other-cel$/^[e-l] + - ^other-cel$/^[m-q] + - ^other-cel$/^re[c-q] + - ^other-cel$/^res + - ^other-cel$/^[s-z] + - ^pod-security-cel$ + - ^psa-cel$ + - ^traefik-cel$ + runs-on: ubuntu-latest + name: ${{ matrix.k8s-version.name }} - ${{ matrix.tests }} + steps: + - name: Checkout + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - name: Setup Environment + uses: ./.github/actions/setup-env + with: + k8s-version: ${{ matrix.k8s-version.version }} + - name: Run CEL Tests + uses: ./.github/actions/run-tests + with: + tests: ${{ matrix.tests }} diff --git a/.github/workflows/check-actions.yaml b/.github/workflows/check-actions.yaml index e0ce4947e..730ebc85d 100644 --- a/.github/workflows/check-actions.yaml +++ b/.github/workflows/check-actions.yaml @@ -16,9 +16,9 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Ensure SHA pinned actions - uses: zgosalvez/github-actions-ensure-sha-pinned-actions@76d1d8e0b075d7190b5d59b86da91c7bdbcc99b2 # v3.0.7 + uses: zgosalvez/github-actions-ensure-sha-pinned-actions@b88cd0aad2c36a63e42c71f81cb1958fed95ac87 # v3.0.10 with: allowlist: | kyverno/chainsaw diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8d6c4f603..056dd9607 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -23,7 +23,7 @@ jobs: options: --user root steps: - name: Checkout code - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: path: policies - name: Run ah lint @@ -33,21 +33,21 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: path: policies - name: Validate all policies run: ./.hack/verify-files-structure.sh working-directory: policies - name: Clone Kyverno - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: repository: kyverno/kyverno path: kyverno # The target branch of a pull request or the branch/tag of a push ref: ${{ github.base_ref || github.ref_name }} - name: Set up Go - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version: ~1.21.1 - name: Test Policy @@ -58,18 +58,18 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: path: policies - name: Checkout Kyverno - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: repository: kyverno/kyverno path: kyverno # The target branch of a pull request or the branch/tag of a push ref: ${{ github.base_ref || github.ref_name }} - name: Set up Go - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version: ~1.21.1 - name: Lint policies diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index c470ea1bc..8c78461b7 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -32,7 +32,6 @@ jobs: - ^argo$ - ^aws$ - ^best-practices$ - - ^best-practices-cel$ - ^castai$ - ^cert-manager$ - ^cleanup$ @@ -49,73 +48,28 @@ jobs: - ^nginx-ingress$ - ^openshift$ - ^other$/^a - - ^other-cel$/^a - ^other$/^[b-d] - - ^other-cel$/^[b-d] - ^other$/^[e-l] - - ^other-cel$/^[e-l] - ^other$/^[m-q] - - ^other-cel$/^[m-q] - ^other$/^re[c-q] - - ^other-cel$/^re[c-q] - ^other$/^res - - ^other-cel$/^res - ^other$/^[s-z] - - ^other-cel$/^res - ^pod-security$ - - ^pod-security-cel$ - ^psa$ - ^psp-migration$ - # - ^tekton - # - ^traefik - # - ^velero + - ^tekton$ + - ^traefik$ + - ^velero$ runs-on: ubuntu-latest name: ${{ matrix.k8s-version.name }} - ${{ matrix.tests }} steps: - name: Checkout - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - name: Setup Go - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - name: Setup Environment + uses: ./.github/actions/setup-env with: - go-version: ~1.21.1 - - name: Install Tools - run: | - set -e - curl -LO "https://dl.k8s.io/release/${{ matrix.k8s-version.version }}/bin/linux/amd64/kubectl" - sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl - - name: Install kind - shell: bash - run: | - set -e - # For AMD64 / x86_64 - [ $(uname -m) = x86_64 ] && curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.20.0/kind-linux-amd64 - # For ARM64 - [ $(uname -m) = aarch64 ] && curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.20.0/kind-linux-arm64 - chmod +x ./kind - sudo mv ./kind /usr/local/bin/kind - - name: Install latest Kyverno CLI - uses: kyverno/action-install-cli@fcee92fca5c883169ef9927acf543e0b5fc58289 # v0.2.0 - - name: Create kind cluster - run: | - set -e - kind create cluster --image kindest/node:${{ matrix.k8s-version.version }} --config ./.github/kind.yml - - name: Install latest kyverno - run: | - set -e - kubectl create -f https://github.com/kyverno/kyverno/raw/main/config/install-latest-testing.yaml - - name: Wait for kyverno ready - run: | - set -e - kubectl wait --namespace kyverno --for=condition=ready pod --selector '!job-name' --timeout=60s - - name: Install CRDs - run: | - set -e - kubectl apply -f ./.chainsaw/crds - - name: Install Chainsaw - uses: kyverno/action-install-chainsaw@dd64b5d7b2b7d36fdf701d48ac8b216aa94414db # v0.2.4 - - name: Test with Chainsaw - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - set -e - chainsaw test --config .chainsaw.yaml --include-test-regex '^chainsaw$/${{ matrix.tests }}' --no-color=false + k8s-version: ${{ matrix.k8s-version.version }} + - name: Run Tests + uses: ./.github/actions/run-tests + with: + tests: ${{ matrix.tests }} \ No newline at end of file diff --git a/argo-cel/application-field-validation/.chainsaw-test/README.md b/argo-cel/application-field-validation/.chainsaw-test/README.md new file mode 100644 index 000000000..56a15f374 --- /dev/null +++ b/argo-cel/application-field-validation/.chainsaw-test/README.md @@ -0,0 +1,15 @@ +## Description + +This is an automated test of the sample policy in this directory. + +## Expected Behavior + +A policy report should be generated in which the following results are observed: + +* `badapp01` fails for the rule `source-path-chart` and passes for the rule `destination-server-name` +* `badapp02` fails for the rule `destination-server-name` and passes for the rule `source-path-chart` +* `goodapp01` passes for both rules + +## Reference Issue(s) + +N/A diff --git a/argo-cel/application-field-validation/.chainsaw-test/bad-application.yaml b/argo-cel/application-field-validation/.chainsaw-test/bad-application.yaml new file mode 100644 index 000000000..d4f36b1be --- /dev/null +++ b/argo-cel/application-field-validation/.chainsaw-test/bad-application.yaml @@ -0,0 +1,31 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: badapp01 + namespace: default +spec: + project: foo + source: + repoURL: https://github.com/argoproj/argocd-example-apps.git + targetRevision: HEAD + path: guestbook + chart: foo + destination: + server: https://kubernetes.default.svc + namespace: guestbook +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: badapp02 + namespace: default +spec: + project: bar + source: + repoURL: https://github.com/argoproj/argocd-example-apps.git + targetRevision: HEAD + path: guestbook + destination: + server: https://kubernetes.default.svc + name: foobar + namespace: guestbook \ No newline at end of file diff --git a/argo-cel/application-field-validation/.chainsaw-test/chainsaw-test.yaml b/argo-cel/application-field-validation/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..2a38d20ec --- /dev/null +++ b/argo-cel/application-field-validation/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,35 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: application-field-validation +spec: + steps: + - name: step-01 + try: + - assert: + file: crd-assert.yaml + - name: step-02 + try: + - apply: + file: ../application-field-validation.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: application-field-validation + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-03 + try: + - apply: + file: good-application.yaml + - apply: + expect: + - check: + ($error != null): true + file: bad-application.yaml diff --git a/argo-cel/application-field-validation/.chainsaw-test/crd-assert.yaml b/argo-cel/application-field-validation/.chainsaw-test/crd-assert.yaml new file mode 100755 index 000000000..c7e226c05 --- /dev/null +++ b/argo-cel/application-field-validation/.chainsaw-test/crd-assert.yaml @@ -0,0 +1,13 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: applications.argoproj.io +spec: {} +status: + acceptedNames: + kind: Application + listKind: ApplicationList + plural: applications + singular: application + storedVersions: + - v1alpha1 diff --git a/argo-cel/application-field-validation/.chainsaw-test/good-application.yaml b/argo-cel/application-field-validation/.chainsaw-test/good-application.yaml new file mode 100644 index 000000000..9ba4a2543 --- /dev/null +++ b/argo-cel/application-field-validation/.chainsaw-test/good-application.yaml @@ -0,0 +1,14 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: goodapp01 + namespace: default +spec: + project: biz + source: + repoURL: https://github.com/argoproj/argocd-example-apps.git + targetRevision: HEAD + path: guestbook + destination: + server: https://kubernetes.default.svc + namespace: guestbook diff --git a/argo-cel/application-field-validation/.chainsaw-test/policy-ready.yaml b/argo-cel/application-field-validation/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..4f0ddbf67 --- /dev/null +++ b/argo-cel/application-field-validation/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: application-field-validation +status: + ready: true diff --git a/argo-cel/application-field-validation/.kyverno-test/kyverno-test.yaml b/argo-cel/application-field-validation/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..d7294f2fa --- /dev/null +++ b/argo-cel/application-field-validation/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,35 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: application-field-validation +policies: +- ../application-field-validation.yaml +resources: +- ../.chainsaw-test/bad-application.yaml +- ../.chainsaw-test/good-application.yaml +results: +- policy: application-field-validation + rule: source-path-chart + kind: Application + resources: + - badapp01 + result: fail +- policy: application-field-validation + rule: destination-server-name + kind: Application + resources: + - badapp02 + result: fail +- policy: application-field-validation + rule: source-path-chart + kind: Application + resources: + - goodapp01 + result: pass +- policy: application-field-validation + rule: destination-server-name + kind: Application + resources: + - goodapp01 + result: pass + diff --git a/argo-cel/application-field-validation/application-field-validation.yaml b/argo-cel/application-field-validation/application-field-validation.yaml new file mode 100644 index 000000000..a55686d65 --- /dev/null +++ b/argo-cel/application-field-validation/application-field-validation.yaml @@ -0,0 +1,61 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: application-field-validation + annotations: + policies.kyverno.io/title: Application Field Validation in CEL expressions + policies.kyverno.io/category: Argo in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Application + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + This policy performs some best practices validation on Application fields. + Path or chart must be specified but never both. And destination.name or + destination.server must be specified but never both. +spec: + validationFailureAction: Audit + background: true + rules: + - name: source-path-chart + match: + any: + - resources: + kinds: + - Application + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + has(object.spec.source) && + ( + (has(object.spec.source.path) && !has(object.spec.source.chart)) || + (!has(object.spec.source.path) && has(object.spec.source.chart)) + ) + message: >- + `spec.source.path` OR `spec.source.chart` should be specified but never both. + - name: destination-server-name + match: + any: + - resources: + kinds: + - Application + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + has(object.spec.destination) && + ( + (has(object.spec.destination.server) && !has(object.spec.destination.name)) || + (!has(object.spec.destination.server) && has(object.spec.destination.name)) + ) + message: >- + `spec.destination.server` OR `spec.destination.name` should be specified but never both. + diff --git a/argo-cel/application-field-validation/artifacthub-pkg.yml b/argo-cel/application-field-validation/artifacthub-pkg.yml new file mode 100644 index 000000000..0bbf6cbc0 --- /dev/null +++ b/argo-cel/application-field-validation/artifacthub-pkg.yml @@ -0,0 +1,23 @@ +name: application-field-validation-cel +version: 1.0.0 +displayName: Application Field Validation in CEL expressions +description: >- + This policy performs some best practices validation on Application fields. Path or chart must be specified but never both. And destination.name or destination.server must be specified but never both. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/argo-cel/application-field-validation/application-field-validation.yaml + ``` +keywords: + - kyverno + - Argo + - CEL Expressions +readme: | + This policy performs some best practices validation on Application fields. Path or chart must be specified but never both. And destination.name or destination.server must be specified but never both. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Argo in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Application" +digest: e3256994e09731ea081230c85e0d6384a187d53377562b1c82ea9385cec0c4a4 +createdAt: "2024-04-30T15:34:03Z" diff --git a/argo-cel/application-prevent-default-project/.chainsaw-test/bad-application.yaml b/argo-cel/application-prevent-default-project/.chainsaw-test/bad-application.yaml new file mode 100644 index 000000000..f533ed401 --- /dev/null +++ b/argo-cel/application-prevent-default-project/.chainsaw-test/bad-application.yaml @@ -0,0 +1,14 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: badapp + namespace: default +spec: + project: default + source: + repoURL: https://github.com/argoproj/argocd-example-apps.git + targetRevision: HEAD + path: guestbook + destination: + server: https://kubernetes.default.svc + namespace: guestbook diff --git a/argo-cel/application-prevent-default-project/.chainsaw-test/chainsaw-test.yaml b/argo-cel/application-prevent-default-project/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..f54e9fae4 --- /dev/null +++ b/argo-cel/application-prevent-default-project/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,35 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: application-prevent-default-project +spec: + steps: + - name: step-01 + try: + - assert: + file: crd-assert.yaml + - name: step-02 + try: + - apply: + file: ../application-prevent-default-project.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: application-prevent-default-project + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-03 + try: + - apply: + file: good-application.yaml + - apply: + expect: + - check: + ($error != null): true + file: bad-application.yaml diff --git a/argo-cel/application-prevent-default-project/.chainsaw-test/crd-assert.yaml b/argo-cel/application-prevent-default-project/.chainsaw-test/crd-assert.yaml new file mode 100755 index 000000000..c7e226c05 --- /dev/null +++ b/argo-cel/application-prevent-default-project/.chainsaw-test/crd-assert.yaml @@ -0,0 +1,13 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: applications.argoproj.io +spec: {} +status: + acceptedNames: + kind: Application + listKind: ApplicationList + plural: applications + singular: application + storedVersions: + - v1alpha1 diff --git a/argo-cel/application-prevent-default-project/.chainsaw-test/good-application.yaml b/argo-cel/application-prevent-default-project/.chainsaw-test/good-application.yaml new file mode 100644 index 000000000..e0d134e68 --- /dev/null +++ b/argo-cel/application-prevent-default-project/.chainsaw-test/good-application.yaml @@ -0,0 +1,14 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: goodapp + namespace: default +spec: + project: biz + source: + repoURL: https://github.com/argoproj/argocd-example-apps.git + targetRevision: HEAD + path: guestbook + destination: + server: https://kubernetes.default.svc + namespace: guestbook diff --git a/argo-cel/application-prevent-default-project/.chainsaw-test/policy-ready.yaml b/argo-cel/application-prevent-default-project/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..0edbc929b --- /dev/null +++ b/argo-cel/application-prevent-default-project/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: application-prevent-default-project +status: + ready: true diff --git a/argo-cel/application-prevent-default-project/.kyverno-test/kyverno-test.yaml b/argo-cel/application-prevent-default-project/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..ee4fb0f4d --- /dev/null +++ b/argo-cel/application-prevent-default-project/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,23 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: application-prevent-default-project +policies: +- ../application-prevent-default-project.yaml +resources: +- ../.chainsaw-test/bad-application.yaml +- ../.chainsaw-test/good-application.yaml +results: +- policy: application-prevent-default-project + rule: default-project + kind: Application + resources: + - badapp + result: fail +- policy: application-prevent-default-project + rule: default-project + kind: Application + resources: + - goodapp + result: pass + diff --git a/argo-cel/application-prevent-default-project/application-prevent-default-project.yaml b/argo-cel/application-prevent-default-project/application-prevent-default-project.yaml new file mode 100644 index 000000000..49290ca53 --- /dev/null +++ b/argo-cel/application-prevent-default-project/application-prevent-default-project.yaml @@ -0,0 +1,33 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: application-prevent-default-project + annotations: + policies.kyverno.io/title: Prevent Use of Default Project in CEL expressions + policies.kyverno.io/category: Argo in CEL + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Application + policies.kyverno.io/description: >- + This policy prevents the use of the default project in an Application. +spec: + validationFailureAction: Audit + background: true + rules: + - name: default-project + match: + any: + - resources: + kinds: + - Application + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "has(object.spec.project) && object.spec.project != 'default'" + message: "The default project may not be used in an Application." + diff --git a/argo-cel/application-prevent-default-project/artifacthub-pkg.yml b/argo-cel/application-prevent-default-project/artifacthub-pkg.yml new file mode 100644 index 000000000..0c10ce14b --- /dev/null +++ b/argo-cel/application-prevent-default-project/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: application-prevent-default-project-cel +version: 1.0.0 +displayName: Prevent Use of Default Project in CEL expressions +description: >- + This policy prevents the use of the default project in an Application. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/argo-cel/application-prevent-default-project/application-prevent-default-project.yaml + ``` +keywords: + - kyverno + - Argo + - CEL Expressions +readme: | + This policy prevents the use of the default project in an Application. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Argo in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Application" +digest: aeb2bc00375b7c44bb36ca7a3cd2f5f80ed17548abf98529d4617803be71196d +createdAt: "2024-04-30T16:03:57Z" + diff --git a/argo-cel/application-prevent-updates-project/.chainsaw-test/application-bad-update.yaml b/argo-cel/application-prevent-updates-project/.chainsaw-test/application-bad-update.yaml new file mode 100644 index 000000000..840d951d0 --- /dev/null +++ b/argo-cel/application-prevent-updates-project/.chainsaw-test/application-bad-update.yaml @@ -0,0 +1,14 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: goodapp + namespace: default +spec: + project: newbiz + source: + repoURL: https://github.com/argoproj/argocd-example-apps.git + targetRevision: HEAD + path: guestbook + destination: + server: https://kubernetes.default.svc + namespace: guestbook diff --git a/argo-cel/application-prevent-updates-project/.chainsaw-test/application-update.yaml b/argo-cel/application-prevent-updates-project/.chainsaw-test/application-update.yaml new file mode 100644 index 000000000..7c3202cbc --- /dev/null +++ b/argo-cel/application-prevent-updates-project/.chainsaw-test/application-update.yaml @@ -0,0 +1,14 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: goodapp + namespace: default +spec: + project: biz + source: + repoURL: https://github.com/argoproj/argocd-example-apps.git + targetRevision: HEAD + path: book + destination: + server: https://kubernetes.default.svc + namespace: book diff --git a/argo-cel/application-prevent-updates-project/.chainsaw-test/application.yaml b/argo-cel/application-prevent-updates-project/.chainsaw-test/application.yaml new file mode 100644 index 000000000..e0d134e68 --- /dev/null +++ b/argo-cel/application-prevent-updates-project/.chainsaw-test/application.yaml @@ -0,0 +1,14 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: goodapp + namespace: default +spec: + project: biz + source: + repoURL: https://github.com/argoproj/argocd-example-apps.git + targetRevision: HEAD + path: guestbook + destination: + server: https://kubernetes.default.svc + namespace: guestbook diff --git a/argo-cel/application-prevent-updates-project/.chainsaw-test/chainsaw-test.yaml b/argo-cel/application-prevent-updates-project/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..1e8dbd46f --- /dev/null +++ b/argo-cel/application-prevent-updates-project/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,37 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: application-prevent-updates-project +spec: + steps: + - name: step-01 + try: + - assert: + file: crd-assert.yaml + - name: step-02 + try: + - apply: + file: ../application-prevent-updates-project.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: application-prevent-updates-project + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-03 + try: + - apply: + file: application.yaml + - apply: + file: application-update.yaml + - apply: + expect: + - check: + ($error != null): true + file: application-bad-update.yaml diff --git a/argo-cel/application-prevent-updates-project/.chainsaw-test/crd-assert.yaml b/argo-cel/application-prevent-updates-project/.chainsaw-test/crd-assert.yaml new file mode 100755 index 000000000..c7e226c05 --- /dev/null +++ b/argo-cel/application-prevent-updates-project/.chainsaw-test/crd-assert.yaml @@ -0,0 +1,13 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: applications.argoproj.io +spec: {} +status: + acceptedNames: + kind: Application + listKind: ApplicationList + plural: applications + singular: application + storedVersions: + - v1alpha1 diff --git a/argo-cel/application-prevent-updates-project/.chainsaw-test/policy-ready.yaml b/argo-cel/application-prevent-updates-project/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..820069362 --- /dev/null +++ b/argo-cel/application-prevent-updates-project/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: application-prevent-updates-project +status: + ready: true diff --git a/argo-cel/application-prevent-updates-project/application-prevent-updates-project.yaml b/argo-cel/application-prevent-updates-project/application-prevent-updates-project.yaml new file mode 100644 index 000000000..c1d6f50bf --- /dev/null +++ b/argo-cel/application-prevent-updates-project/application-prevent-updates-project.yaml @@ -0,0 +1,32 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: application-prevent-updates-project + annotations: + policies.kyverno.io/title: Prevent Updates to Project in CEL expressions + policies.kyverno.io/category: Argo in CEL + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.12.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Application + policies.kyverno.io/description: >- + This policy prevents updates to the project field after an Application is created. +spec: + validationFailureAction: Audit + background: true + rules: + - name: project-updates + match: + any: + - resources: + kinds: + - Application + celPreconditions: + - name: "operation-should-be-update" + expression: "request.operation == 'UPDATE'" + validate: + cel: + expressions: + - expression: "object.spec.project == oldObject.spec.project" + message: "The spec.project cannot be changed once the Application is created." + diff --git a/argo-cel/application-prevent-updates-project/artifacthub-pkg.yml b/argo-cel/application-prevent-updates-project/artifacthub-pkg.yml new file mode 100644 index 000000000..f69ab2037 --- /dev/null +++ b/argo-cel/application-prevent-updates-project/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: application-prevent-updates-project-cel +version: 1.0.0 +displayName: Prevent Updates to Project in CEL expressions +description: >- + This policy prevents updates to the project field after an Application is created. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/argo-cel/application-prevent-updates-project/application-prevent-updates-project.yaml + ``` +keywords: + - kyverno + - Argo + - CEL Expressions +readme: | + This policy prevents updates to the project field after an Application is created. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Argo in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Application" +digest: a9aebf68a690cd9b5683fd24dc26dc0949d0ee1c122a153bd417f9e6d4a2b47a +createdAt: "2024-05-01T17:10:46Z" + diff --git a/argo-cel/applicationset-name-matches-project/.chainsaw-test/bad-appset.yaml b/argo-cel/applicationset-name-matches-project/.chainsaw-test/bad-appset.yaml new file mode 100644 index 000000000..5814ffcce --- /dev/null +++ b/argo-cel/applicationset-name-matches-project/.chainsaw-test/bad-appset.yaml @@ -0,0 +1,24 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: bad-guestbook +spec: + generators: + - list: + elements: + - cluster: engineering-dev + url: https://1.2.3.4 + - cluster: engineering-prod + url: https://2.4.6.8 + template: + metadata: + name: '{{cluster}}-guestbook' + spec: + project: not-guestbook + source: + repoURL: https://github.com/infra-team/cluster-deployments.git + targetRevision: HEAD + path: guestbook/{{cluster}} + destination: + server: '{{url}}' + namespace: guestbook \ No newline at end of file diff --git a/argo-cel/applicationset-name-matches-project/.chainsaw-test/chainsaw-test.yaml b/argo-cel/applicationset-name-matches-project/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..05a65d0ad --- /dev/null +++ b/argo-cel/applicationset-name-matches-project/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,35 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: applicationset-name-matches-project +spec: + steps: + - name: step-01 + try: + - assert: + file: crd-assert.yaml + - name: step-02 + try: + - apply: + file: ../applicationset-name-matches-project.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: applicationset-name-matches-project + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-03 + try: + - apply: + file: good-appset.yaml + - apply: + expect: + - check: + ($error != null): true + file: bad-appset.yaml diff --git a/argo-cel/applicationset-name-matches-project/.chainsaw-test/crd-assert.yaml b/argo-cel/applicationset-name-matches-project/.chainsaw-test/crd-assert.yaml new file mode 100755 index 000000000..2d1a65c97 --- /dev/null +++ b/argo-cel/applicationset-name-matches-project/.chainsaw-test/crd-assert.yaml @@ -0,0 +1,13 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: applicationsets.argoproj.io +spec: {} +status: + acceptedNames: + kind: ApplicationSet + listKind: ApplicationSetList + plural: applicationsets + singular: applicationset + storedVersions: + - v1alpha1 diff --git a/argo-cel/applicationset-name-matches-project/.chainsaw-test/good-appset.yaml b/argo-cel/applicationset-name-matches-project/.chainsaw-test/good-appset.yaml new file mode 100644 index 000000000..cb57f79bb --- /dev/null +++ b/argo-cel/applicationset-name-matches-project/.chainsaw-test/good-appset.yaml @@ -0,0 +1,24 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: guestbook +spec: + generators: + - list: + elements: + - cluster: engineering-dev + url: https://1.2.3.4 + - cluster: engineering-prod + url: https://2.4.6.8 + template: + metadata: + name: '{{cluster}}-guestbook' + spec: + project: guestbook + source: + repoURL: https://github.com/infra-team/cluster-deployments.git + targetRevision: HEAD + path: guestbook/{{cluster}} + destination: + server: '{{url}}' + namespace: guestbook \ No newline at end of file diff --git a/argo-cel/applicationset-name-matches-project/.chainsaw-test/policy-ready.yaml b/argo-cel/applicationset-name-matches-project/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..5e891f2a0 --- /dev/null +++ b/argo-cel/applicationset-name-matches-project/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: applicationset-name-matches-project +status: + ready: true diff --git a/argo-cel/applicationset-name-matches-project/.kyverno-test/kyverno-test.yaml b/argo-cel/applicationset-name-matches-project/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..b41f71e22 --- /dev/null +++ b/argo-cel/applicationset-name-matches-project/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,23 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: applicationset-name-matches-project +policies: +- ../applicationset-name-matches-project.yaml +resources: +- ../.chainsaw-test/bad-appset.yaml +- ../.chainsaw-test/good-appset.yaml +results: +- policy: applicationset-name-matches-project + rule: match-name + kind: ApplicationSet + resources: + - bad-guestbook + result: fail +- policy: applicationset-name-matches-project + rule: match-name + kind: ApplicationSet + resources: + - guestbook + result: pass + diff --git a/argo-cel/applicationset-name-matches-project/applicationset-name-matches-project.yaml b/argo-cel/applicationset-name-matches-project/applicationset-name-matches-project.yaml new file mode 100644 index 000000000..af30a936c --- /dev/null +++ b/argo-cel/applicationset-name-matches-project/applicationset-name-matches-project.yaml @@ -0,0 +1,34 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: applicationset-name-matches-project + annotations: + policies.kyverno.io/title: Ensure ApplicationSet Name Matches Project in CEL expressions + policies.kyverno.io/category: Argo in CEL + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: ApplicationSet + policies.kyverno.io/description: >- + This policy ensures that the name of the ApplicationSet is the + same value provided in the project. +spec: + validationFailureAction: Audit + background: true + rules: + - name: match-name + match: + any: + - resources: + kinds: + - ApplicationSet + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "object.spec.template.spec.project == object.metadata.name" + message: "The name must match the project." + diff --git a/argo-cel/applicationset-name-matches-project/artifacthub-pkg.yml b/argo-cel/applicationset-name-matches-project/artifacthub-pkg.yml new file mode 100644 index 000000000..b248176e3 --- /dev/null +++ b/argo-cel/applicationset-name-matches-project/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: applicationset-name-matches-project-cel +version: 1.0.0 +displayName: Ensure ApplicationSet Name Matches Project in CEL expressions +description: >- + This policy ensures that the name of the ApplicationSet is the same value provided in the project. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/argo-cel/applicationset-name-matches-project/applicationset-name-matches-project.yaml + ``` +keywords: + - kyverno + - Argo + - CEL Expressions +readme: | + This policy ensures that the name of the ApplicationSet is the same value provided in the project. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Argo in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "ApplicationSet" +digest: bcb427d1e2b0623c4c5d0e30bbdec1bdac60a6adb6b2a7e7d2bc74221668ad25 +createdAt: "2024-05-01T16:44:11Z" + diff --git a/argo-cel/appproject-clusterresourceblacklist/.chainsaw-test/bad-both-wildcard.yaml b/argo-cel/appproject-clusterresourceblacklist/.chainsaw-test/bad-both-wildcard.yaml new file mode 100644 index 000000000..572430859 --- /dev/null +++ b/argo-cel/appproject-clusterresourceblacklist/.chainsaw-test/bad-both-wildcard.yaml @@ -0,0 +1,12 @@ +apiVersion: argoproj.io/v1alpha1 +kind: AppProject +metadata: + name: badappproj03 +spec: + description: Test Project + destinations: + - namespace: default + server: https://kubernetes.default.svc + clusterResourceBlacklist: + - group: '' + kind: 'Pod' \ No newline at end of file diff --git a/argo-cel/appproject-clusterresourceblacklist/.chainsaw-test/bad-group-wildcard.yaml b/argo-cel/appproject-clusterresourceblacklist/.chainsaw-test/bad-group-wildcard.yaml new file mode 100644 index 000000000..27de0770b --- /dev/null +++ b/argo-cel/appproject-clusterresourceblacklist/.chainsaw-test/bad-group-wildcard.yaml @@ -0,0 +1,12 @@ +apiVersion: argoproj.io/v1alpha1 +kind: AppProject +metadata: + name: badappproj01 +spec: + description: Test Project + destinations: + - namespace: default + server: https://kubernetes.default.svc + clusterResourceBlacklist: + - group: '' + kind: '*' \ No newline at end of file diff --git a/argo-cel/appproject-clusterresourceblacklist/.chainsaw-test/bad-kind-wildcard.yaml b/argo-cel/appproject-clusterresourceblacklist/.chainsaw-test/bad-kind-wildcard.yaml new file mode 100644 index 000000000..096ddbb50 --- /dev/null +++ b/argo-cel/appproject-clusterresourceblacklist/.chainsaw-test/bad-kind-wildcard.yaml @@ -0,0 +1,12 @@ +apiVersion: argoproj.io/v1alpha1 +kind: AppProject +metadata: + name: badappproj02 +spec: + description: Test Project + destinations: + - namespace: default + server: https://kubernetes.default.svc + clusterResourceBlacklist: + - group: '*' + kind: 'Secret' \ No newline at end of file diff --git a/argo-cel/appproject-clusterresourceblacklist/.chainsaw-test/bad-no-blacklist.yaml b/argo-cel/appproject-clusterresourceblacklist/.chainsaw-test/bad-no-blacklist.yaml new file mode 100644 index 000000000..9a682b846 --- /dev/null +++ b/argo-cel/appproject-clusterresourceblacklist/.chainsaw-test/bad-no-blacklist.yaml @@ -0,0 +1,9 @@ +apiVersion: argoproj.io/v1alpha1 +kind: AppProject +metadata: + name: badappproj04 +spec: + description: Test Project + destinations: + - namespace: default + server: https://kubernetes.default.svc \ No newline at end of file diff --git a/argo-cel/appproject-clusterresourceblacklist/.chainsaw-test/chainsaw-test.yaml b/argo-cel/appproject-clusterresourceblacklist/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..454dffd03 --- /dev/null +++ b/argo-cel/appproject-clusterresourceblacklist/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,50 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: appproject-clusterresourceblacklist +spec: + steps: + - name: step-01 + try: + - assert: + file: crd-assert.yaml + - name: step-02 + try: + - apply: + file: ../appproject-clusterresourceblacklist.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: appproject-clusterresourceblacklist + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-03 + try: + - apply: + file: good.yaml + - apply: + expect: + - check: + ($error != null): true + file: bad-both-wildcard.yaml + - apply: + expect: + - check: + ($error != null): true + file: bad-group-wildcard.yaml + - apply: + expect: + - check: + ($error != null): true + file: bad-kind-wildcard.yaml + - apply: + expect: + - check: + ($error != null): true + file: bad-no-blacklist.yaml diff --git a/argo-cel/appproject-clusterresourceblacklist/.chainsaw-test/crd-assert.yaml b/argo-cel/appproject-clusterresourceblacklist/.chainsaw-test/crd-assert.yaml new file mode 100755 index 000000000..e24f922f8 --- /dev/null +++ b/argo-cel/appproject-clusterresourceblacklist/.chainsaw-test/crd-assert.yaml @@ -0,0 +1,13 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: appprojects.argoproj.io +spec: {} +status: + acceptedNames: + kind: AppProject + listKind: AppProjectList + plural: appprojects + singular: appproject + storedVersions: + - v1alpha1 diff --git a/argo-cel/appproject-clusterresourceblacklist/.chainsaw-test/good.yaml b/argo-cel/appproject-clusterresourceblacklist/.chainsaw-test/good.yaml new file mode 100644 index 000000000..48a4f2181 --- /dev/null +++ b/argo-cel/appproject-clusterresourceblacklist/.chainsaw-test/good.yaml @@ -0,0 +1,12 @@ +apiVersion: argoproj.io/v1alpha1 +kind: AppProject +metadata: + name: goodappproj +spec: + description: Test Project + destinations: + - namespace: default + server: https://kubernetes.default.svc + clusterResourceBlacklist: + - group: '*' + kind: '*' \ No newline at end of file diff --git a/argo-cel/appproject-clusterresourceblacklist/.chainsaw-test/policy-ready.yaml b/argo-cel/appproject-clusterresourceblacklist/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..745e7b98b --- /dev/null +++ b/argo-cel/appproject-clusterresourceblacklist/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: appproject-clusterresourceblacklist +status: + ready: true diff --git a/argo-cel/appproject-clusterresourceblacklist/.kyverno-test/kyverno-test.yaml b/argo-cel/appproject-clusterresourceblacklist/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..37f844910 --- /dev/null +++ b/argo-cel/appproject-clusterresourceblacklist/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,26 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: appproject-clusterresourceblacklist +policies: +- ../appproject-clusterresourceblacklist.yaml +resources: +- resources.yaml +results: +- kind: AppProject + policy: appproject-clusterresourceblacklist + resources: + - badappproj01 + - badappproj02 + - badappproj03 + - badappproj04 + result: fail + rule: has-wildcard-and-validate-clusterresourceblacklist +- kind: AppProject + policy: appproject-clusterresourceblacklist + resources: + - goodappproj01 + - goodappproj02 + result: pass + rule: has-wildcard-and-validate-clusterresourceblacklist + diff --git a/argo-cel/appproject-clusterresourceblacklist/.kyverno-test/resources.yaml b/argo-cel/appproject-clusterresourceblacklist/.kyverno-test/resources.yaml new file mode 100644 index 000000000..1ed3ebc86 --- /dev/null +++ b/argo-cel/appproject-clusterresourceblacklist/.kyverno-test/resources.yaml @@ -0,0 +1,78 @@ +--- +apiVersion: argoproj.io/v1alpha1 +kind: AppProject +metadata: + name: goodappproj01 +spec: + description: Test Project + destinations: + - namespace: default + server: https://kubernetes.default.svc + clusterResourceBlacklist: + - group: '*' + kind: '*' +--- +apiVersion: argoproj.io/v1alpha1 +kind: AppProject +metadata: + name: goodappproj02 +spec: + description: Test Project + destinations: + - namespace: default + server: https://kubernetes.default.svc + clusterResourceBlacklist: + - group: '*' + kind: '*' +--- +apiVersion: argoproj.io/v1alpha1 +kind: AppProject +metadata: + name: badappproj01 +spec: + description: Test Project + destinations: + - namespace: default + server: https://kubernetes.default.svc + clusterResourceBlacklist: + - group: '' + kind: '*' +--- +apiVersion: argoproj.io/v1alpha1 +kind: AppProject +metadata: + name: badappproj02 +spec: + description: Test Project + destinations: + - namespace: default + server: https://kubernetes.default.svc + clusterResourceBlacklist: + - group: '*' + kind: 'Secret' +--- +apiVersion: argoproj.io/v1alpha1 +kind: AppProject +metadata: + name: badappproj03 +spec: + description: Test Project + destinations: + - namespace: default + server: https://kubernetes.default.svc + clusterResourceBlacklist: + - group: '' + kind: 'Pod' +--- +apiVersion: argoproj.io/v1alpha1 +kind: AppProject +metadata: + name: badappproj04 +spec: + description: Test Project + destinations: + - namespace: default + server: https://kubernetes.default.svc + # clusterResourceBlacklist: + # - group: '' + # kind: 'Pod' diff --git a/argo-cel/appproject-clusterresourceblacklist/appproject-clusterresourceblacklist.yaml b/argo-cel/appproject-clusterresourceblacklist/appproject-clusterresourceblacklist.yaml new file mode 100644 index 000000000..18827a80e --- /dev/null +++ b/argo-cel/appproject-clusterresourceblacklist/appproject-clusterresourceblacklist.yaml @@ -0,0 +1,39 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: appproject-clusterresourceblacklist + annotations: + policies.kyverno.io/title: Enforce AppProject with clusterResourceBlacklist in CEL expressions + policies.kyverno.io/category: Argo in CEL + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: AppProject + policies.kyverno.io/description: >- + An AppProject may optionally specify clusterResourceBlacklist which is a blacklisted + group of cluster resources. This is often a good practice to ensure AppProjects do + not allow more access than needed. This policy is a combination of two rules which + enforce that all AppProjects specify clusterResourceBlacklist and that their group + and kind have wildcards as values. +spec: + validationFailureAction: Audit + background: true + rules: + - name: has-wildcard-and-validate-clusterresourceblacklist + match: + any: + - resources: + kinds: + - AppProject + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "has(object.spec.clusterResourceBlacklist)" + message: "AppProject must specify clusterResourceBlacklist." + - expression: "object.spec.clusterResourceBlacklist.all(element, element.group.contains('*') && element.kind.contains('*'))" + message: "Wildcards must be present in group and kind for clusterResourceBlacklist." + diff --git a/argo-cel/appproject-clusterresourceblacklist/artifacthub-pkg.yml b/argo-cel/appproject-clusterresourceblacklist/artifacthub-pkg.yml new file mode 100644 index 000000000..b91d65bae --- /dev/null +++ b/argo-cel/appproject-clusterresourceblacklist/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: appproject-clusterresourceblacklist-cel +version: 1.0.0 +displayName: Enforce AppProject with clusterResourceBlacklist in CEL expressions +description: >- + An AppProject may optionally specify clusterResourceBlacklist which is a blacklisted group of cluster resources. This is often a good practice to ensure AppProjects do not allow more access than needed. This policy is a combination of two rules which enforce that all AppProjects specify clusterResourceBlacklist and that their group and kind have wildcards as values. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/argo-cel/appproject-clusterresourceblacklist/appproject-clusterresourceblacklist.yaml + ``` +keywords: + - kyverno + - Argo + - CEL Expressions +readme: | + An AppProject may optionally specify clusterResourceBlacklist which is a blacklisted group of cluster resources. This is often a good practice to ensure AppProjects do not allow more access than needed. This policy is a combination of two rules which enforce that all AppProjects specify clusterResourceBlacklist and that their group and kind have wildcards as values. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Argo in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "AppProject" +digest: 4c6e24e7f54e6473c6e56cd704c9de401b8c5703570e72d32d2c27bb38641b11 +createdAt: "2024-05-01T16:57:53Z" + diff --git a/argo/application-field-validation/.kyverno-test/kyverno-test.yaml b/argo/application-field-validation/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..d7294f2fa --- /dev/null +++ b/argo/application-field-validation/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,35 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: application-field-validation +policies: +- ../application-field-validation.yaml +resources: +- ../.chainsaw-test/bad-application.yaml +- ../.chainsaw-test/good-application.yaml +results: +- policy: application-field-validation + rule: source-path-chart + kind: Application + resources: + - badapp01 + result: fail +- policy: application-field-validation + rule: destination-server-name + kind: Application + resources: + - badapp02 + result: fail +- policy: application-field-validation + rule: source-path-chart + kind: Application + resources: + - goodapp01 + result: pass +- policy: application-field-validation + rule: destination-server-name + kind: Application + resources: + - goodapp01 + result: pass + diff --git a/argo/application-prevent-default-project/.kyverno-test/kyverno-test.yaml b/argo/application-prevent-default-project/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..ee4fb0f4d --- /dev/null +++ b/argo/application-prevent-default-project/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,23 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: application-prevent-default-project +policies: +- ../application-prevent-default-project.yaml +resources: +- ../.chainsaw-test/bad-application.yaml +- ../.chainsaw-test/good-application.yaml +results: +- policy: application-prevent-default-project + rule: default-project + kind: Application + resources: + - badapp + result: fail +- policy: application-prevent-default-project + rule: default-project + kind: Application + resources: + - goodapp + result: pass + diff --git a/argo/applicationset-name-matches-project/.chainsaw-test/bad-appset.yaml b/argo/applicationset-name-matches-project/.chainsaw-test/bad-appset.yaml index 269f150e1..5814ffcce 100644 --- a/argo/applicationset-name-matches-project/.chainsaw-test/bad-appset.yaml +++ b/argo/applicationset-name-matches-project/.chainsaw-test/bad-appset.yaml @@ -1,7 +1,7 @@ apiVersion: argoproj.io/v1alpha1 kind: ApplicationSet metadata: - name: guestbook + name: bad-guestbook spec: generators: - list: diff --git a/argo/applicationset-name-matches-project/.kyverno-test/kyverno-test.yaml b/argo/applicationset-name-matches-project/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..b41f71e22 --- /dev/null +++ b/argo/applicationset-name-matches-project/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,23 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: applicationset-name-matches-project +policies: +- ../applicationset-name-matches-project.yaml +resources: +- ../.chainsaw-test/bad-appset.yaml +- ../.chainsaw-test/good-appset.yaml +results: +- policy: applicationset-name-matches-project + rule: match-name + kind: ApplicationSet + resources: + - bad-guestbook + result: fail +- policy: applicationset-name-matches-project + rule: match-name + kind: ApplicationSet + resources: + - guestbook + result: pass + diff --git a/aws-cel/require-encryption-aws-loadbalancers/.chainsaw-test/chainsaw-test.yaml b/aws-cel/require-encryption-aws-loadbalancers/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..6b1de9110 --- /dev/null +++ b/aws-cel/require-encryption-aws-loadbalancers/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,33 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: require-encryption-aws-loadbalancers +spec: + steps: + - name: step-01 + try: + - apply: + file: ../require-encryption-aws-loadbalancers.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: require-encryption-aws-loadbalancers + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + expect: + - check: + ($error != null): true + file: service-fail.yaml + - apply: + file: service-pass.yaml + - apply: + file: service-skip.yaml diff --git a/aws-cel/require-encryption-aws-loadbalancers/.chainsaw-test/policy-ready.yaml b/aws-cel/require-encryption-aws-loadbalancers/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..7806e1591 --- /dev/null +++ b/aws-cel/require-encryption-aws-loadbalancers/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-encryption-aws-loadbalancers +status: + ready: true diff --git a/aws-cel/require-encryption-aws-loadbalancers/.chainsaw-test/service-fail.yaml b/aws-cel/require-encryption-aws-loadbalancers/.chainsaw-test/service-fail.yaml new file mode 100644 index 000000000..59f6034af --- /dev/null +++ b/aws-cel/require-encryption-aws-loadbalancers/.chainsaw-test/service-fail.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Service +metadata: + name: nginx-service +spec: + selector: + app: nginx + ports: + - port: 80 + targetPort: 8080 + type: LoadBalancer \ No newline at end of file diff --git a/aws-cel/require-encryption-aws-loadbalancers/.chainsaw-test/service-pass.yaml b/aws-cel/require-encryption-aws-loadbalancers/.chainsaw-test/service-pass.yaml new file mode 100644 index 000000000..25aa97d5e --- /dev/null +++ b/aws-cel/require-encryption-aws-loadbalancers/.chainsaw-test/service-pass.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + name: example-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: "SSL-CERT-HERE" +spec: + selector: + app: example + ports: + - port: 80 + targetPort: 8080 + type: LoadBalancer \ No newline at end of file diff --git a/aws-cel/require-encryption-aws-loadbalancers/.chainsaw-test/service-skip.yaml b/aws-cel/require-encryption-aws-loadbalancers/.chainsaw-test/service-skip.yaml new file mode 100644 index 000000000..56019f7cb --- /dev/null +++ b/aws-cel/require-encryption-aws-loadbalancers/.chainsaw-test/service-skip.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: nginx + name: nginx +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 80 + selector: + run: nginx + type: ClusterIP \ No newline at end of file diff --git a/aws-cel/require-encryption-aws-loadbalancers/.kyverno-test/kyverno-test.yaml b/aws-cel/require-encryption-aws-loadbalancers/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..c0813f244 --- /dev/null +++ b/aws-cel/require-encryption-aws-loadbalancers/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,30 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: require-encryption-aws-loadbalancers +policies: +- ../require-encryption-aws-loadbalancers.yaml +resources: +- ../.chainsaw-test/service-fail.yaml +- ../.chainsaw-test/service-pass.yaml +- ../.chainsaw-test/service-skip.yaml +results: +- policy: require-encryption-aws-loadbalancers + rule: aws-loadbalancer-has-ssl-cert + kind: Service + resources: + - nginx-service + result: fail +- policy: require-encryption-aws-loadbalancers + rule: aws-loadbalancer-has-ssl-cert + kind: Service + resources: + - example-service + result: pass +- policy: require-encryption-aws-loadbalancers + rule: aws-loadbalancer-has-ssl-cert + kind: Service + resources: + - nginx + result: skip + diff --git a/aws-cel/require-encryption-aws-loadbalancers/artifacthub-pkg.yml b/aws-cel/require-encryption-aws-loadbalancers/artifacthub-pkg.yml new file mode 100644 index 000000000..5b4a27745 --- /dev/null +++ b/aws-cel/require-encryption-aws-loadbalancers/artifacthub-pkg.yml @@ -0,0 +1,25 @@ +name: require-encryption-aws-loadbalancers-cel +version: 1.0.0 +displayName: Require Encryption with AWS LoadBalancers in CEL expressions +description: >- + Services of type LoadBalancer when deployed inside AWS have support for transport encryption if it is enabled via an annotation. This policy requires that Services of type LoadBalancer contain the annotation service.beta.kubernetes.io/aws-load-balancer-ssl-cert with some value. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/aws-cel/require-encryption-aws-loadbalancers/require-encryption-aws-loadbalancers.yaml + ``` +keywords: + - kyverno + - AWS + - EKS Best Practices + - CEL Expressions +readme: | + Services of type LoadBalancer when deployed inside AWS have support for transport encryption if it is enabled via an annotation. This policy requires that Services of type LoadBalancer contain the annotation service.beta.kubernetes.io/aws-load-balancer-ssl-cert with some value. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "AWS, EKS Best Practices in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Service" +digest: 0a2c4fcb1a4aa5900aef0abba83625024def643c47ccfe1c6e0d1314c484f6f5 +createdAt: "2024-05-11T16:01:13Z" + diff --git a/aws-cel/require-encryption-aws-loadbalancers/require-encryption-aws-loadbalancers.yaml b/aws-cel/require-encryption-aws-loadbalancers/require-encryption-aws-loadbalancers.yaml new file mode 100644 index 000000000..7c94d956c --- /dev/null +++ b/aws-cel/require-encryption-aws-loadbalancers/require-encryption-aws-loadbalancers.yaml @@ -0,0 +1,40 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-encryption-aws-loadbalancers + annotations: + policies.kyverno.io/title: Require Encryption with AWS LoadBalancers in CEL expressions + policies.kyverno.io/category: AWS, EKS Best Practices in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Service + kyverno.io/kyverno-version: 1.12.1 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Services of type LoadBalancer when deployed inside AWS have support for + transport encryption if it is enabled via an annotation. This policy requires + that Services of type LoadBalancer contain the annotation + service.beta.kubernetes.io/aws-load-balancer-ssl-cert with some value. +spec: + validationFailureAction: Audit + background: true + rules: + - name: aws-loadbalancer-has-ssl-cert + match: + any: + - resources: + kinds: + - Service + operations: + - CREATE + - UPDATE + celPreconditions: + - name: "type-should-be-load-balancer" + expression: "object.spec.type == 'LoadBalancer'" + validate: + cel: + expressions: + - expression: >- + has(object.metadata.annotations) && + 'service.beta.kubernetes.io/aws-load-balancer-ssl-cert' in object.metadata.annotations && object.metadata.annotations['service.beta.kubernetes.io/aws-load-balancer-ssl-cert'] != '' + message: "Service of type LoadBalancer must carry the annotation service.beta.kubernetes.io/aws-load-balancer-ssl-cert." + diff --git a/aws/require-encryption-aws-loadbalancers/.kyverno-test/kyverno-test.yaml b/aws/require-encryption-aws-loadbalancers/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..c0813f244 --- /dev/null +++ b/aws/require-encryption-aws-loadbalancers/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,30 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: require-encryption-aws-loadbalancers +policies: +- ../require-encryption-aws-loadbalancers.yaml +resources: +- ../.chainsaw-test/service-fail.yaml +- ../.chainsaw-test/service-pass.yaml +- ../.chainsaw-test/service-skip.yaml +results: +- policy: require-encryption-aws-loadbalancers + rule: aws-loadbalancer-has-ssl-cert + kind: Service + resources: + - nginx-service + result: fail +- policy: require-encryption-aws-loadbalancers + rule: aws-loadbalancer-has-ssl-cert + kind: Service + resources: + - example-service + result: pass +- policy: require-encryption-aws-loadbalancers + rule: aws-loadbalancer-has-ssl-cert + kind: Service + resources: + - nginx + result: skip + diff --git a/best-practices-cel/disallow-cri-sock-mount/artifacthub-pkg.yml b/best-practices-cel/disallow-cri-sock-mount/artifacthub-pkg.yml index 0c1b038ab..b27921347 100644 --- a/best-practices-cel/disallow-cri-sock-mount/artifacthub-pkg.yml +++ b/best-practices-cel/disallow-cri-sock-mount/artifacthub-pkg.yml @@ -20,6 +20,6 @@ annotations: kyverno/category: "Best Practices, EKS Best Practices in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: 0b91de77f8a6da0cafea457e0ba9eb14f0b8eb6bbcb56419a4e9de09c860753d +digest: 535db8906befe485750d0cc9094aca1a064e2738d9f1d60bd1dd72da9d7b6ca2 createdAt: "2024-03-14T15:59:52Z" diff --git a/best-practices-cel/disallow-cri-sock-mount/disallow-cri-sock-mount.yaml b/best-practices-cel/disallow-cri-sock-mount/disallow-cri-sock-mount.yaml index 3351e4eb5..b243dd332 100644 --- a/best-practices-cel/disallow-cri-sock-mount/disallow-cri-sock-mount.yaml +++ b/best-practices-cel/disallow-cri-sock-mount/disallow-cri-sock-mount.yaml @@ -26,6 +26,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: variables: diff --git a/best-practices-cel/disallow-default-namespace/artifacthub-pkg.yml b/best-practices-cel/disallow-default-namespace/artifacthub-pkg.yml index a1e3896fd..60eefd39c 100644 --- a/best-practices-cel/disallow-default-namespace/artifacthub-pkg.yml +++ b/best-practices-cel/disallow-default-namespace/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Multi-Tenancy in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: ab82cba43ef60cf84553fd3119a13db17cc1bc97d8d9a71526b7d3a636c6ce2b +digest: f2f0202e5f53ea5c446960c2c2467824b3ebb737150b4e9e4a83e700f89c3195 createdAt: "2024-03-08T06:15:05Z" diff --git a/best-practices-cel/disallow-default-namespace/disallow-default-namespace.yaml b/best-practices-cel/disallow-default-namespace/disallow-default-namespace.yaml index 70a09fefd..ea58613dc 100644 --- a/best-practices-cel/disallow-default-namespace/disallow-default-namespace.yaml +++ b/best-practices-cel/disallow-default-namespace/disallow-default-namespace.yaml @@ -28,6 +28,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: @@ -42,6 +45,9 @@ spec: - Deployment - Job - StatefulSet + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/best-practices-cel/disallow-empty-ingress-host/artifacthub-pkg.yml b/best-practices-cel/disallow-empty-ingress-host/artifacthub-pkg.yml index 43a417ede..0db591b73 100644 --- a/best-practices-cel/disallow-empty-ingress-host/artifacthub-pkg.yml +++ b/best-practices-cel/disallow-empty-ingress-host/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Best Practices in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Ingress" -digest: 9cb3d5814cea4a34de185ce8fd469762a83ef93b910e70b2e6a9ec953e65448c +digest: 0ffe2735a10b721569cf7139d0d7d51dbc9327beae68e50e4f54f560804548e9 createdAt: "2024-03-09T14:19:51Z" diff --git a/best-practices-cel/disallow-empty-ingress-host/disallow-empty-ingress-host.yaml b/best-practices-cel/disallow-empty-ingress-host/disallow-empty-ingress-host.yaml index 6a243a03d..c8cf73536 100644 --- a/best-practices-cel/disallow-empty-ingress-host/disallow-empty-ingress-host.yaml +++ b/best-practices-cel/disallow-empty-ingress-host/disallow-empty-ingress-host.yaml @@ -23,6 +23,9 @@ spec: - resources: kinds: - Ingress + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/best-practices-cel/disallow-helm-tiller/artifacthub-pkg.yml b/best-practices-cel/disallow-helm-tiller/artifacthub-pkg.yml index 6e9077843..ad20504c1 100644 --- a/best-practices-cel/disallow-helm-tiller/artifacthub-pkg.yml +++ b/best-practices-cel/disallow-helm-tiller/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Sample in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: 68bd8e1cf068759dc436032f3bcb1204992b84ba33498ffd76b744329976769e +digest: 3ec71460444eda338adc7c96f76d9369275f9b494f9fca8248e240d4424937dc createdAt: "2024-03-08T06:30:37Z" diff --git a/best-practices-cel/disallow-helm-tiller/disallow-helm-tiller.yaml b/best-practices-cel/disallow-helm-tiller/disallow-helm-tiller.yaml index f439c5bed..a9f5c8338 100644 --- a/best-practices-cel/disallow-helm-tiller/disallow-helm-tiller.yaml +++ b/best-practices-cel/disallow-helm-tiller/disallow-helm-tiller.yaml @@ -25,6 +25,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/best-practices-cel/disallow-latest-tag/artifacthub-pkg.yml b/best-practices-cel/disallow-latest-tag/artifacthub-pkg.yml index 6507d2f0c..6275bb017 100644 --- a/best-practices-cel/disallow-latest-tag/artifacthub-pkg.yml +++ b/best-practices-cel/disallow-latest-tag/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Best Practices in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: a477c5213f24a7096495b438a30733d4bbea2da32b2e1f8f71696d72d68d7704 +digest: 46eddb82b6df69bf68894505115899ff2ed833cbe22a05b3c933abf422017110 createdAt: "2024-03-07T20:17:11Z" diff --git a/best-practices-cel/disallow-latest-tag/disallow-latest-tag.yaml b/best-practices-cel/disallow-latest-tag/disallow-latest-tag.yaml index 6c3b1e74d..4c467efb7 100644 --- a/best-practices-cel/disallow-latest-tag/disallow-latest-tag.yaml +++ b/best-practices-cel/disallow-latest-tag/disallow-latest-tag.yaml @@ -24,6 +24,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/best-practices-cel/require-drop-all/artifacthub-pkg.yml b/best-practices-cel/require-drop-all/artifacthub-pkg.yml index 737bd1c04..8d7c64e35 100644 --- a/best-practices-cel/require-drop-all/artifacthub-pkg.yml +++ b/best-practices-cel/require-drop-all/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Best Practices in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: ccf9ef87f6686a93ce044ade6cbe9e68be85679f9a5b315a61eeb0aeafe9ef15 +digest: c3d8959bdc68460e21ff5495994d0bb1a3aa7cb7a5b31740af33638b2dad466c createdAt: "2024-03-10T05:05:42Z" diff --git a/best-practices-cel/require-drop-all/require-drop-all.yaml b/best-practices-cel/require-drop-all/require-drop-all.yaml index 5dd9c317c..70b9eca5f 100644 --- a/best-practices-cel/require-drop-all/require-drop-all.yaml +++ b/best-practices-cel/require-drop-all/require-drop-all.yaml @@ -25,6 +25,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: variables: diff --git a/best-practices-cel/require-drop-cap-net-raw/artifacthub-pkg.yml b/best-practices-cel/require-drop-cap-net-raw/artifacthub-pkg.yml index 53e42ed4b..4e5d6742d 100644 --- a/best-practices-cel/require-drop-cap-net-raw/artifacthub-pkg.yml +++ b/best-practices-cel/require-drop-cap-net-raw/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Best Practices in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: a85254bf47f0ec7a3f95139285257228f8f4571f7328ce3652a850d7e901b8c0 +digest: ef4e56b25b29423934e0e21cdea2d6c4e0ae3e67d84a1456f52b3d66fe9fa25a createdAt: "2024-03-15T03:05:47Z" diff --git a/best-practices-cel/require-drop-cap-net-raw/require-drop-cap-net-raw.yaml b/best-practices-cel/require-drop-cap-net-raw/require-drop-cap-net-raw.yaml index 4b8e23222..1064b6335 100644 --- a/best-practices-cel/require-drop-cap-net-raw/require-drop-cap-net-raw.yaml +++ b/best-practices-cel/require-drop-cap-net-raw/require-drop-cap-net-raw.yaml @@ -26,6 +26,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: variables: diff --git a/best-practices-cel/require-labels/artifacthub-pkg.yml b/best-practices-cel/require-labels/artifacthub-pkg.yml index 4ec582c4a..321438b73 100644 --- a/best-practices-cel/require-labels/artifacthub-pkg.yml +++ b/best-practices-cel/require-labels/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Best Practices in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod, Label" -digest: 8d6468a51c9a06d26d61874840ff5d0dbda8fe1b4afc9674dcb823a6df0965f7 +digest: cdcd97f2977e45e753975a75184c12d37e297a615f50322be925e64885ffa5e0 createdAt: "2024-03-06T19:31:45Z" diff --git a/best-practices-cel/require-labels/require-labels.yaml b/best-practices-cel/require-labels/require-labels.yaml index f4321ca2a..c43d15657 100644 --- a/best-practices-cel/require-labels/require-labels.yaml +++ b/best-practices-cel/require-labels/require-labels.yaml @@ -24,6 +24,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/best-practices-cel/require-pod-requests-limits/artifacthub-pkg.yml b/best-practices-cel/require-pod-requests-limits/artifacthub-pkg.yml index 24e1e8a86..894ba6f8e 100644 --- a/best-practices-cel/require-pod-requests-limits/artifacthub-pkg.yml +++ b/best-practices-cel/require-pod-requests-limits/artifacthub-pkg.yml @@ -20,6 +20,6 @@ annotations: kyverno/category: "Best Practices, EKS Best Practices in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: 9ffe67ea0a581f0373ab2dda4a48f4a3e589301c4a51a1e058494016d1f4228b +digest: 68eb214fbee5f70f276845c2083cfadc942ed0d45c8237462a152771cdc7c299 createdAt: "2024-03-15T03:34:10Z" diff --git a/best-practices-cel/require-pod-requests-limits/require-pod-requests-limits.yaml b/best-practices-cel/require-pod-requests-limits/require-pod-requests-limits.yaml index 1c241e150..35e0fca07 100644 --- a/best-practices-cel/require-pod-requests-limits/require-pod-requests-limits.yaml +++ b/best-practices-cel/require-pod-requests-limits/require-pod-requests-limits.yaml @@ -26,6 +26,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/best-practices-cel/require-probes/artifacthub-pkg.yml b/best-practices-cel/require-probes/artifacthub-pkg.yml index a0281a1bb..fba1c4b28 100644 --- a/best-practices-cel/require-probes/artifacthub-pkg.yml +++ b/best-practices-cel/require-probes/artifacthub-pkg.yml @@ -20,6 +20,6 @@ annotations: kyverno/category: "Best Practices, EKS Best Practices in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: bef0d9498ddf0d62aa5fe8671726b4f79cb6c53c7657e48b6fa5a21ded00fefe +digest: 4c8b625397475449d6c047c78b460ea943ca9753526790bbc725e75163534dd9 createdAt: "2024-03-10T14:28:37Z" diff --git a/best-practices-cel/require-probes/require-probes.yaml b/best-practices-cel/require-probes/require-probes.yaml index b39c9d16d..cf14da6f9 100644 --- a/best-practices-cel/require-probes/require-probes.yaml +++ b/best-practices-cel/require-probes/require-probes.yaml @@ -28,6 +28,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/best-practices-cel/require-ro-rootfs/artifacthub-pkg.yml b/best-practices-cel/require-ro-rootfs/artifacthub-pkg.yml index f99c882ce..87f6545b4 100644 --- a/best-practices-cel/require-ro-rootfs/artifacthub-pkg.yml +++ b/best-practices-cel/require-ro-rootfs/artifacthub-pkg.yml @@ -20,6 +20,6 @@ annotations: kyverno/category: "Best Practices, EKS Best Practices in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: 3e33d8d031a0cf31168bd04a0b691520ff01b6dc908554bbc1591700c0e69b0e +digest: 08e28ef463ea200092f19e279fa3da071b276315f555b579786c564bbb8718c5 createdAt: "2024-03-07T12:35:00Z" diff --git a/best-practices-cel/require-ro-rootfs/require-ro-rootfs.yaml b/best-practices-cel/require-ro-rootfs/require-ro-rootfs.yaml index bf56915c4..84a042438 100644 --- a/best-practices-cel/require-ro-rootfs/require-ro-rootfs.yaml +++ b/best-practices-cel/require-ro-rootfs/require-ro-rootfs.yaml @@ -25,6 +25,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/best-practices-cel/restrict-image-registries/artifacthub-pkg.yml b/best-practices-cel/restrict-image-registries/artifacthub-pkg.yml index 330273cff..fc35e4940 100644 --- a/best-practices-cel/restrict-image-registries/artifacthub-pkg.yml +++ b/best-practices-cel/restrict-image-registries/artifacthub-pkg.yml @@ -20,6 +20,6 @@ annotations: kyverno/category: "Best Practices, EKS Best Practices in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: 9a431345cee629f64c388e843e39a83ce327d049ac03c02ebad4105abe0c9af5 +digest: cac6e95f5ac6f7d7235349ac935745672c2112a0a5400e8fb1f59c9750850ad0 createdAt: "2024-03-07T13:35:11Z" diff --git a/best-practices-cel/restrict-image-registries/restrict-image-registries.yaml b/best-practices-cel/restrict-image-registries/restrict-image-registries.yaml index 5c8bfd43b..6d55959fd 100644 --- a/best-practices-cel/restrict-image-registries/restrict-image-registries.yaml +++ b/best-practices-cel/restrict-image-registries/restrict-image-registries.yaml @@ -25,6 +25,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: variables: diff --git a/best-practices-cel/restrict-node-port/artifacthub-pkg.yml b/best-practices-cel/restrict-node-port/artifacthub-pkg.yml index dbe33e351..8118d2e63 100644 --- a/best-practices-cel/restrict-node-port/artifacthub-pkg.yml +++ b/best-practices-cel/restrict-node-port/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Best Practices in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Service" -digest: 5cee0523689e20cc0e5adccc2cf76d725c7f8df2213e7dfdede1fb768fa9a2b9 +digest: 94702e242c40699edeccac5f44c1d481c5b0426396eb3de4ed2ca771aed7868e createdAt: "2024-03-06T14:04:34Z" diff --git a/best-practices-cel/restrict-node-port/restrict-node-port.yaml b/best-practices-cel/restrict-node-port/restrict-node-port.yaml index 4a28d2a1b..9ea76c4b4 100644 --- a/best-practices-cel/restrict-node-port/restrict-node-port.yaml +++ b/best-practices-cel/restrict-node-port/restrict-node-port.yaml @@ -25,6 +25,9 @@ spec: - resources: kinds: - Service + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/best-practices-cel/restrict-service-external-ips/artifacthub-pkg.yml b/best-practices-cel/restrict-service-external-ips/artifacthub-pkg.yml index 2aad9d303..c89fc356a 100644 --- a/best-practices-cel/restrict-service-external-ips/artifacthub-pkg.yml +++ b/best-practices-cel/restrict-service-external-ips/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Best Practices in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Service" -digest: d03ee8911a09239eb433b737858c9e7eb99da256ab9114a2268537e235817b3c +digest: dae3c0bf20b0a1a0f3ad7e395d3c05742a4e6ec87813bb16d63eae2ebaa9a744 createdAt: "2024-03-07T05:48:27Z" diff --git a/best-practices-cel/restrict-service-external-ips/restrict-service-external-ips.yaml b/best-practices-cel/restrict-service-external-ips/restrict-service-external-ips.yaml index 636ddc996..4d75de9da 100644 --- a/best-practices-cel/restrict-service-external-ips/restrict-service-external-ips.yaml +++ b/best-practices-cel/restrict-service-external-ips/restrict-service-external-ips.yaml @@ -24,6 +24,9 @@ spec: - resources: kinds: - Service + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/cleanup/cleanup-bare-pods/.chainsaw-test/chainsaw-step-02-assert-1.yaml b/cleanup/cleanup-bare-pods/.chainsaw-test/chainsaw-step-02-assert-1.yaml new file mode 100644 index 000000000..f0fe23d34 --- /dev/null +++ b/cleanup/cleanup-bare-pods/.chainsaw-test/chainsaw-step-02-assert-1.yaml @@ -0,0 +1,4 @@ +apiVersion: kyverno.io/v2beta1 +kind: ClusterCleanupPolicy +metadata: + name: clean-bare-pods diff --git a/cleanup/cleanup-bare-pods/.chainsaw-test/chainsaw-test.yaml b/cleanup/cleanup-bare-pods/.chainsaw-test/chainsaw-test.yaml new file mode 100644 index 000000000..d9cf0944a --- /dev/null +++ b/cleanup/cleanup-bare-pods/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,38 @@ +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: cleanup-bare-pods +spec: + steps: + - name: apply cluster role + try: + - apply: + file: cluster-role.yaml + - name: create a bare pod + try: + - apply: + file: pod.yaml + - assert: + file: pod.yaml + - name: apply cleanup policy + try: + - apply: + file: ../cleanup-bare-pods.yaml + - patch: + resource: + apiVersion: kyverno.io/v2beta1 + kind: ClusterCleanupPolicy + metadata: + name: clean-bare-pods + spec: + schedule: "*/1 * * * *" + - assert: + file: chainsaw-step-02-assert-1.yaml + - name: wait for scheduled deletion + try: + - sleep: + duration: 1m30s + - name: check for bare pod + try: + - error: + file: pod.yaml \ No newline at end of file diff --git a/kasten/k10-generate-gold-backup-policy/.chainsaw-test/chainsaw-step-01-apply-1.yaml b/cleanup/cleanup-bare-pods/.chainsaw-test/cluster-role.yaml old mode 100755 new mode 100644 similarity index 54% rename from kasten/k10-generate-gold-backup-policy/.chainsaw-test/chainsaw-step-01-apply-1.yaml rename to cleanup/cleanup-bare-pods/.chainsaw-test/cluster-role.yaml index 1dc53ed2c..6e5bdaf66 --- a/kasten/k10-generate-gold-backup-policy/.chainsaw-test/chainsaw-step-01-apply-1.yaml +++ b/cleanup/cleanup-bare-pods/.chainsaw-test/cluster-role.yaml @@ -1,17 +1,20 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: + namespace: kyverno labels: - app.kubernetes.io/component: background-controller + app.kubernetes.io/component: cleanup-controller app.kubernetes.io/instance: kyverno app.kubernetes.io/part-of: kyverno - name: kyverno:background-controller:k10-goldbackuppolicy + name: kyverno:cleanup-controller:barepods rules: - apiGroups: - - config.kio.kasten.io + - "" resources: - - policies + - pods verbs: - - create - - update + - get + - watch + - list - delete + diff --git a/cleanup/cleanup-bare-pods/.chainsaw-test/pod.yaml b/cleanup/cleanup-bare-pods/.chainsaw-test/pod.yaml new file mode 100644 index 000000000..966df958a --- /dev/null +++ b/cleanup/cleanup-bare-pods/.chainsaw-test/pod.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Pod +metadata: + name: bare-pod +spec: + containers: + - name: nginx + image: nginx:1.14.1 diff --git a/consul-cel/enforce-min-tls-version/.chainsaw-test/chainsaw-test.yaml b/consul-cel/enforce-min-tls-version/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..2385bfce2 --- /dev/null +++ b/consul-cel/enforce-min-tls-version/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,27 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: enforce-min-tls-version +spec: + steps: + - name: step-00 + try: + - assert: + file: crd-assert.yaml + - name: step-01 + try: + - apply: + file: ../enforce-min-tls-version.yaml + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: mesh-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: mesh-bad.yaml diff --git a/consul-cel/enforce-min-tls-version/.chainsaw-test/crd-assert.yaml b/consul-cel/enforce-min-tls-version/.chainsaw-test/crd-assert.yaml new file mode 100755 index 000000000..49fddfad6 --- /dev/null +++ b/consul-cel/enforce-min-tls-version/.chainsaw-test/crd-assert.yaml @@ -0,0 +1,13 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: meshes.consul.hashicorp.com +spec: {} +status: + acceptedNames: + kind: Mesh + listKind: MeshList + plural: meshes + singular: mesh + storedVersions: + - v1alpha1 diff --git a/consul-cel/enforce-min-tls-version/.chainsaw-test/mesh-bad.yaml b/consul-cel/enforce-min-tls-version/.chainsaw-test/mesh-bad.yaml new file mode 100644 index 000000000..510ff09c6 --- /dev/null +++ b/consul-cel/enforce-min-tls-version/.chainsaw-test/mesh-bad.yaml @@ -0,0 +1,8 @@ +apiVersion: consul.hashicorp.com/v1alpha1 +kind: Mesh +metadata: + name: badmesh01 +spec: + tls: + incoming: + tlsMinVersion: TLSv1_1 \ No newline at end of file diff --git a/consul-cel/enforce-min-tls-version/.chainsaw-test/mesh-good.yaml b/consul-cel/enforce-min-tls-version/.chainsaw-test/mesh-good.yaml new file mode 100644 index 000000000..a21ce7ecb --- /dev/null +++ b/consul-cel/enforce-min-tls-version/.chainsaw-test/mesh-good.yaml @@ -0,0 +1,8 @@ +apiVersion: consul.hashicorp.com/v1alpha1 +kind: Mesh +metadata: + name: goodmesh01 +spec: + tls: + incoming: + tlsMinVersion: TLSv1_2 \ No newline at end of file diff --git a/kasten/k10-generate-gold-backup-policy/.chainsaw-test/policy-ready.yaml b/consul-cel/enforce-min-tls-version/.chainsaw-test/policy-ready.yaml similarity index 65% rename from kasten/k10-generate-gold-backup-policy/.chainsaw-test/policy-ready.yaml rename to consul-cel/enforce-min-tls-version/.chainsaw-test/policy-ready.yaml index 47d9d5ff2..be7a47e8e 100644 --- a/kasten/k10-generate-gold-backup-policy/.chainsaw-test/policy-ready.yaml +++ b/consul-cel/enforce-min-tls-version/.chainsaw-test/policy-ready.yaml @@ -1,6 +1,6 @@ apiVersion: kyverno.io/v1 kind: ClusterPolicy metadata: - name: k10-generate-gold-backup-policy + name: enforce-min-tls-version status: ready: true \ No newline at end of file diff --git a/consul-cel/enforce-min-tls-version/.kyverno-test/kyverno-test.yaml b/consul-cel/enforce-min-tls-version/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..999cb075d --- /dev/null +++ b/consul-cel/enforce-min-tls-version/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,21 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: enforce-min-tls-version +policies: +- ../enforce-min-tls-version.yaml +resources: +- resource.yaml +results: +- kind: Mesh + policy: enforce-min-tls-version + resources: + - badmesh + result: fail + rule: check-for-tls-version +- kind: Mesh + policy: enforce-min-tls-version + resources: + - goodmesh + result: pass + rule: check-for-tls-version diff --git a/consul-cel/enforce-min-tls-version/.kyverno-test/resource.yaml b/consul-cel/enforce-min-tls-version/.kyverno-test/resource.yaml new file mode 100644 index 000000000..a6bf83b5f --- /dev/null +++ b/consul-cel/enforce-min-tls-version/.kyverno-test/resource.yaml @@ -0,0 +1,17 @@ +apiVersion: consul.hashicorp.com/v1alpha1 +kind: Mesh +metadata: + name: badmesh +spec: + tls: + incoming: + tlsMinVersion: TLSv1_1 +--- +apiVersion: consul.hashicorp.com/v1alpha1 +kind: Mesh +metadata: + name: goodmesh +spec: + tls: + incoming: + tlsMinVersion: TLSv1_2 \ No newline at end of file diff --git a/consul-cel/enforce-min-tls-version/artifacthub-pkg.yml b/consul-cel/enforce-min-tls-version/artifacthub-pkg.yml new file mode 100644 index 000000000..29cee5d57 --- /dev/null +++ b/consul-cel/enforce-min-tls-version/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: enforce-min-tls-version-cel +version: 1.0.0 +displayName: Enforce Consul min TLS version in CEL expressions +description: >- + This policy will check the TLS Min version to ensure that whenever the mesh is set, there is a minimum version of TLS set for all the service mesh proxies and this enforces that service mesh mTLS traffic uses TLS v1.2 or newer. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/consul-cel/enforce-min-tls-version/enforce-min-tls-version.yaml + ``` +keywords: + - kyverno + - Consul + - CEL Expressions +readme: | + This policy will check the TLS Min version to ensure that whenever the mesh is set, there is a minimum version of TLS set for all the service mesh proxies and this enforces that service mesh mTLS traffic uses TLS v1.2 or newer. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Consul in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Mesh" +digest: 076a14dd5d7a4b4b69d9b7c1c53deab6e8b2c0ce0ed570f3cf07b661fca92aef +createdAt: "2024-05-02T17:47:54Z" + diff --git a/consul-cel/enforce-min-tls-version/enforce-min-tls-version.yaml b/consul-cel/enforce-min-tls-version/enforce-min-tls-version.yaml new file mode 100644 index 000000000..e7d340b3d --- /dev/null +++ b/consul-cel/enforce-min-tls-version/enforce-min-tls-version.yaml @@ -0,0 +1,35 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: enforce-min-tls-version + annotations: + policies.kyverno.io/title: Enforce Consul min TLS version in CEL expressions + policies.kyverno.io/category: Consul in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Mesh + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + This policy will check the TLS Min version to ensure that whenever the mesh is set, there is a minimum version of TLS set for all the service mesh proxies and this enforces that service mesh mTLS traffic uses TLS v1.2 or newer. +spec: + validationFailureAction: Enforce + background: true + rules: + - name: check-for-tls-version + match: + any: + - resources: + kinds: + - Mesh + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + has(object.spec) && has(object.spec.tls) && has(object.spec.tls.incoming) && + has(object.spec.tls.incoming.tlsMinVersion) && object.spec.tls.incoming.tlsMinVersion == 'TLSv1_2' + message: The minimum version of TLS is TLS v1_2 + diff --git a/flux-cel/verify-flux-sources/.chainsaw-test/bucket-crd-assert.yaml b/flux-cel/verify-flux-sources/.chainsaw-test/bucket-crd-assert.yaml new file mode 100755 index 000000000..688485ded --- /dev/null +++ b/flux-cel/verify-flux-sources/.chainsaw-test/bucket-crd-assert.yaml @@ -0,0 +1,13 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: buckets.source.toolkit.fluxcd.io +spec: {} +status: + acceptedNames: + kind: Bucket + listKind: BucketList + plural: buckets + singular: bucket + storedVersions: + - v1beta2 diff --git a/flux-cel/verify-flux-sources/.chainsaw-test/chainsaw-test.yaml b/flux-cel/verify-flux-sources/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..1d0b7c63e --- /dev/null +++ b/flux-cel/verify-flux-sources/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,64 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: verify-flux-sources +spec: + steps: + - name: step-01 + try: + - apply: + file: ../verify-flux-sources.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: verify-flux-sources + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - assert: + file: bucket-crd-assert.yaml + - assert: + file: git-repository-crd-assert.yaml + - assert: + file: image-repository-crd-assert.yaml + - assert: + file: helm-repository-crd-assert.yaml + - name: step-02 + try: + - apply: + file: ns.yaml + - name: step-03 + try: + - apply: + expect: + - check: + ($error != null): true + file: repo-bad-git.yaml + - apply: + expect: + - check: + ($error != null): true + file: repo-bad-bucket.yaml + - apply: + expect: + - check: + ($error != null): true + file: repo-bad-helm.yaml + - apply: + expect: + - check: + ($error != null): true + file: repo-bad-image.yaml + - apply: + file: repo-good-git.yaml + - apply: + file: repo-good-bucket.yaml + - apply: + file: repo-good-helm.yaml + - apply: + file: repo-good-image.yaml diff --git a/flux-cel/verify-flux-sources/.chainsaw-test/git-repository-crd-assert.yaml b/flux-cel/verify-flux-sources/.chainsaw-test/git-repository-crd-assert.yaml new file mode 100755 index 000000000..79db50af3 --- /dev/null +++ b/flux-cel/verify-flux-sources/.chainsaw-test/git-repository-crd-assert.yaml @@ -0,0 +1,13 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: gitrepositories.source.toolkit.fluxcd.io +spec: {} +status: + acceptedNames: + kind: GitRepository + listKind: GitRepositoryList + plural: gitrepositories + singular: gitrepository + storedVersions: + - v1 diff --git a/flux-cel/verify-flux-sources/.chainsaw-test/helm-repository-crd-assert.yaml b/flux-cel/verify-flux-sources/.chainsaw-test/helm-repository-crd-assert.yaml new file mode 100755 index 000000000..22d1c289c --- /dev/null +++ b/flux-cel/verify-flux-sources/.chainsaw-test/helm-repository-crd-assert.yaml @@ -0,0 +1,13 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: helmrepositories.source.toolkit.fluxcd.io +spec: {} +status: + acceptedNames: + kind: HelmRepository + listKind: HelmRepositoryList + plural: helmrepositories + singular: helmrepository + storedVersions: + - v1beta2 diff --git a/flux-cel/verify-flux-sources/.chainsaw-test/image-repository-crd-assert.yaml b/flux-cel/verify-flux-sources/.chainsaw-test/image-repository-crd-assert.yaml new file mode 100755 index 000000000..51fc5cd50 --- /dev/null +++ b/flux-cel/verify-flux-sources/.chainsaw-test/image-repository-crd-assert.yaml @@ -0,0 +1,13 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: imagerepositories.image.toolkit.fluxcd.io +spec: {} +status: + acceptedNames: + kind: ImageRepository + listKind: ImageRepositoryList + plural: imagerepositories + singular: imagerepository + storedVersions: + - v1beta2 diff --git a/kasten/k10-generate-gold-backup-policy/.chainsaw-test/chainsaw-step-03-apply-4.yaml b/flux-cel/verify-flux-sources/.chainsaw-test/ns.yaml similarity index 67% rename from kasten/k10-generate-gold-backup-policy/.chainsaw-test/chainsaw-step-03-apply-4.yaml rename to flux-cel/verify-flux-sources/.chainsaw-test/ns.yaml index 5a136cef6..c00a4321e 100755 --- a/kasten/k10-generate-gold-backup-policy/.chainsaw-test/chainsaw-step-03-apply-4.yaml +++ b/flux-cel/verify-flux-sources/.chainsaw-test/ns.yaml @@ -1,4 +1,4 @@ apiVersion: v1 kind: Namespace metadata: - name: k10-gp-ns04 + name: flux-system diff --git a/flux-cel/verify-flux-sources/.chainsaw-test/policy-ready.yaml b/flux-cel/verify-flux-sources/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..30e325f84 --- /dev/null +++ b/flux-cel/verify-flux-sources/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: verify-flux-sources +status: + ready: true diff --git a/flux-cel/verify-flux-sources/.chainsaw-test/repo-bad-bucket.yaml b/flux-cel/verify-flux-sources/.chainsaw-test/repo-bad-bucket.yaml new file mode 100644 index 000000000..0fb02ca4a --- /dev/null +++ b/flux-cel/verify-flux-sources/.chainsaw-test/repo-bad-bucket.yaml @@ -0,0 +1,8 @@ +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: Bucket +metadata: + name: bucket-bad +spec: + interval: 5m0s + endpoint: minio.notmyorg.com + bucketName: example \ No newline at end of file diff --git a/flux-cel/verify-flux-sources/.chainsaw-test/repo-bad-git.yaml b/flux-cel/verify-flux-sources/.chainsaw-test/repo-bad-git.yaml new file mode 100644 index 000000000..62998778b --- /dev/null +++ b/flux-cel/verify-flux-sources/.chainsaw-test/repo-bad-git.yaml @@ -0,0 +1,7 @@ +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: bad-gitrepo-01 +spec: + interval: 5m0s + url: https://github.com/notmyorg/podinfo \ No newline at end of file diff --git a/flux-cel/verify-flux-sources/.chainsaw-test/repo-bad-helm.yaml b/flux-cel/verify-flux-sources/.chainsaw-test/repo-bad-helm.yaml new file mode 100644 index 000000000..11a996a11 --- /dev/null +++ b/flux-cel/verify-flux-sources/.chainsaw-test/repo-bad-helm.yaml @@ -0,0 +1,7 @@ +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: bad-helmrepo-01 +spec: + interval: 5m0s + url: https://helmrepo.github.io/podinfo \ No newline at end of file diff --git a/flux-cel/verify-flux-sources/.chainsaw-test/repo-bad-image.yaml b/flux-cel/verify-flux-sources/.chainsaw-test/repo-bad-image.yaml new file mode 100644 index 000000000..2be404b43 --- /dev/null +++ b/flux-cel/verify-flux-sources/.chainsaw-test/repo-bad-image.yaml @@ -0,0 +1,8 @@ +apiVersion: image.toolkit.fluxcd.io/v1beta2 +kind: ImageRepository +metadata: + name: imagerepo-bad +spec: + image: nothing.io/notmyorg/ + interval: 1h + provider: generic \ No newline at end of file diff --git a/flux-cel/verify-flux-sources/.chainsaw-test/repo-good-bucket.yaml b/flux-cel/verify-flux-sources/.chainsaw-test/repo-good-bucket.yaml new file mode 100644 index 000000000..0669a4190 --- /dev/null +++ b/flux-cel/verify-flux-sources/.chainsaw-test/repo-good-bucket.yaml @@ -0,0 +1,18 @@ +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: Bucket +metadata: + name: good-bucket-01 +spec: + interval: 5m0s + endpoint: minio.myorg.com + bucketName: example +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: Bucket +metadata: + name: good-bucket-02 + namespace: flux-system +spec: + interval: 5m0s + endpoint: minio.notmyorg.com + bucketName: example \ No newline at end of file diff --git a/flux-cel/verify-flux-sources/.chainsaw-test/repo-good-git.yaml b/flux-cel/verify-flux-sources/.chainsaw-test/repo-good-git.yaml new file mode 100644 index 000000000..e98df9e9c --- /dev/null +++ b/flux-cel/verify-flux-sources/.chainsaw-test/repo-good-git.yaml @@ -0,0 +1,24 @@ +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: good-gitrepo-01 +spec: + interval: 5m0s + url: https://github.com/myorg/podinfo +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: good-gitrepo-02 +spec: + interval: 5m0s + url: ssh://git@github.com:myorg/podinfo +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: good-gitrepo-03 + namespace: flux-system +spec: + interval: 5m0s + url: https://github.com/notmyorg/podinfo \ No newline at end of file diff --git a/flux-cel/verify-flux-sources/.chainsaw-test/repo-good-helm.yaml b/flux-cel/verify-flux-sources/.chainsaw-test/repo-good-helm.yaml new file mode 100644 index 000000000..17b32fd4c --- /dev/null +++ b/flux-cel/verify-flux-sources/.chainsaw-test/repo-good-helm.yaml @@ -0,0 +1,16 @@ +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: good-helmrepo-01 +spec: + interval: 5m0s + url: https://helmrepo.myorg.com/podinfo +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: good-helmrepo-02 + namespace: flux-system +spec: + interval: 5m0s + url: https://notmyorg.github.io/podinfo \ No newline at end of file diff --git a/flux-cel/verify-flux-sources/.chainsaw-test/repo-good-image.yaml b/flux-cel/verify-flux-sources/.chainsaw-test/repo-good-image.yaml new file mode 100644 index 000000000..beebfd1aa --- /dev/null +++ b/flux-cel/verify-flux-sources/.chainsaw-test/repo-good-image.yaml @@ -0,0 +1,18 @@ +apiVersion: image.toolkit.fluxcd.io/v1beta2 +kind: ImageRepository +metadata: + name: good-imagerepo-01 +spec: + image: ghcr.io/myorg/ + interval: 1h + provider: generic +--- +apiVersion: image.toolkit.fluxcd.io/v1beta2 +kind: ImageRepository +metadata: + name: good-imagerepo-02 + namespace: flux-system +spec: + image: nothing.io/notmyorg/ + interval: 1h + provider: generic \ No newline at end of file diff --git a/flux-cel/verify-flux-sources/.kyverno-test/kyverno-test.yaml b/flux-cel/verify-flux-sources/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..e455f191c --- /dev/null +++ b/flux-cel/verify-flux-sources/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,66 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: verify-flux-sources +policies: +- ../verify-flux-sources.yaml +resources: +- ../.chainsaw-test/repo-bad-bucket.yaml +- ../.chainsaw-test/repo-bad-git.yaml +- ../.chainsaw-test/repo-bad-helm.yaml +- ../.chainsaw-test/repo-bad-image.yaml +- ../.chainsaw-test/repo-good-bucket.yaml +- ../.chainsaw-test/repo-good-git.yaml +- ../.chainsaw-test/repo-good-helm.yaml +- ../.chainsaw-test/repo-good-image.yaml +results: +- policy: verify-flux-sources + rule: flux-github-repositories + kind: GitRepository + resources: + - bad-gitrepo-01 + result: fail +- policy: verify-flux-sources + rule: flux-github-repositories + kind: GitRepository + resources: + - good-gitrepo-01 + - good-gitrepo-02 + result: pass +- policy: verify-flux-sources + rule: flux-buckets + kind: Bucket + resources: + - bucket-bad + result: fail +- policy: verify-flux-sources + rule: flux-buckets + kind: Bucket + resources: + - good-bucket-01 + result: pass +- policy: verify-flux-sources + rule: flux-helm-repositories + kind: HelmRepository + resources: + - bad-helmrepo-01 + result: fail +- policy: verify-flux-sources + rule: flux-helm-repositories + kind: HelmRepository + resources: + - good-helmrepo-01 + result: pass +- policy: verify-flux-sources + rule: flux-image-repositories + kind: ImageRepository + resources: + - imagerepo-bad + result: fail +- policy: verify-flux-sources + rule: flux-image-repositories + kind: ImageRepository + resources: + - good-imagerepo-01 + result: pass + diff --git a/flux-cel/verify-flux-sources/artifacthub-pkg.yml b/flux-cel/verify-flux-sources/artifacthub-pkg.yml new file mode 100644 index 000000000..31c2fedbe --- /dev/null +++ b/flux-cel/verify-flux-sources/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: verify-flux-sources-cel +version: 1.0.0 +displayName: Verify Flux Sources in CEL expressions +description: >- + Flux source APIs include a number of different sources such as GitRepository, Bucket, HelmRepository, and ImageRepository resources. Each of these by default can be pointed to any location. In a production environment, it may be desired to restrict these to only known sources to prevent accessing outside sources. This policy verifies that each of the Flux sources comes from a trusted location. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/flux-cel/verify-flux-sources/verify-flux-sources.yaml + ``` +keywords: + - kyverno + - Flux + - CEL Expressions +readme: | + Flux source APIs include a number of different sources such as GitRepository, Bucket, HelmRepository, and ImageRepository resources. Each of these by default can be pointed to any location. In a production environment, it may be desired to restrict these to only known sources to prevent accessing outside sources. This policy verifies that each of the Flux sources comes from a trusted location. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Flux in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "GitRepository, Bucket, HelmRepository, ImageRepository" +digest: bf6f3413334accaa083d0b203909b82f74b0131e862799124b940afd86e4372d +createdAt: "2024-05-11T15:02:04Z" + diff --git a/flux-cel/verify-flux-sources/verify-flux-sources.yaml b/flux-cel/verify-flux-sources/verify-flux-sources.yaml new file mode 100644 index 000000000..7c15ed2a8 --- /dev/null +++ b/flux-cel/verify-flux-sources/verify-flux-sources.yaml @@ -0,0 +1,99 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: verify-flux-sources + annotations: + policies.kyverno.io/title: Verify Flux Sources in CEL expressions + policies.kyverno.io/category: Flux in CEL + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: GitRepository, Bucket, HelmRepository, ImageRepository + policies.kyverno.io/description: >- + Flux source APIs include a number of different sources such as + GitRepository, Bucket, HelmRepository, and ImageRepository resources. Each of these + by default can be pointed to any location. In a production environment, + it may be desired to restrict these to only known sources to prevent + accessing outside sources. This policy verifies that each of the Flux + sources comes from a trusted location. +spec: + validationFailureAction: Audit + rules: + - name: flux-github-repositories + match: + any: + - resources: + kinds: + - GitRepository + operations: + - CREATE + - UPDATE + exclude: + any: + - resources: + namespaces: + - flux-system + validate: + cel: + expressions: + - expression: "object.spec.url.startsWith('https://github.com/myorg/') || object.spec.url.startsWith('ssh://git@github.com:myorg/')" + message: ".spec.url must be from a repository within the myorg organization." + - name: flux-buckets + match: + any: + - resources: + kinds: + - Bucket + operations: + - CREATE + - UPDATE + exclude: + any: + - resources: + namespaces: + - flux-system + validate: + cel: + expressions: + - expression: "has(object.spec.endpoint) && object.spec.endpoint.endsWith('.myorg.com')" + message: ".spec.endpoint must reference an address within the myorg organization." + - name: flux-helm-repositories + match: + any: + - resources: + kinds: + - HelmRepository + operations: + - CREATE + - UPDATE + exclude: + any: + - resources: + namespaces: + - flux-system + validate: + cel: + expressions: + - expression: "object.spec.url.matches('^https://[a-zA-Z0-9-]+[.]myorg[.]com/.*$')" + message: ".spec.url must be from a repository within the myorg organization." + - name: flux-image-repositories + match: + any: + - resources: + kinds: + - ImageRepository + operations: + - CREATE + - UPDATE + exclude: + any: + - resources: + namespaces: + - flux-system + validate: + cel: + expressions: + - expression: "has(object.spec.image) && object.spec.image.startsWith('ghcr.io/myorg/')" + message: ".spec.image must be from an image repository within the myorg organization." + diff --git a/flux-cel/verify-git-repositories/.chainsaw-test-rename-after-issue-10313-fix/bad-gitrepositories.yaml b/flux-cel/verify-git-repositories/.chainsaw-test-rename-after-issue-10313-fix/bad-gitrepositories.yaml new file mode 100644 index 000000000..035895270 --- /dev/null +++ b/flux-cel/verify-git-repositories/.chainsaw-test-rename-after-issue-10313-fix/bad-gitrepositories.yaml @@ -0,0 +1,15 @@ +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: bad-gitrepo-01 +spec: + interval: 5m0s + url: https://github.com/kyverno/foo +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: bad-gitrepo-02 +spec: + interval: 5m0s + url: ssh://git@github.com:kyverno/bar \ No newline at end of file diff --git a/flux-cel/verify-git-repositories/.chainsaw-test-rename-after-issue-10313-fix/bad.yaml b/flux-cel/verify-git-repositories/.chainsaw-test-rename-after-issue-10313-fix/bad.yaml new file mode 100644 index 000000000..035895270 --- /dev/null +++ b/flux-cel/verify-git-repositories/.chainsaw-test-rename-after-issue-10313-fix/bad.yaml @@ -0,0 +1,15 @@ +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: bad-gitrepo-01 +spec: + interval: 5m0s + url: https://github.com/kyverno/foo +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: bad-gitrepo-02 +spec: + interval: 5m0s + url: ssh://git@github.com:kyverno/bar \ No newline at end of file diff --git a/flux-cel/verify-git-repositories/.chainsaw-test-rename-after-issue-10313-fix/chainsaw-test-rename-after-issue-10313-fix.yaml b/flux-cel/verify-git-repositories/.chainsaw-test-rename-after-issue-10313-fix/chainsaw-test-rename-after-issue-10313-fix.yaml new file mode 100644 index 000000000..43dbc87b0 --- /dev/null +++ b/flux-cel/verify-git-repositories/.chainsaw-test-rename-after-issue-10313-fix/chainsaw-test-rename-after-issue-10313-fix.yaml @@ -0,0 +1,31 @@ +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: verify-git-repositories +spec: + steps: + - name: 01 - Create policy and verify + try: + - apply: + file: ../verify-git-repositories.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: verify-git-repositories + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: 02 - Create good GitRepository + try: + - apply: + file: good-gitrepositories.yaml + - name: 03 - Create bad GitRepository + try: + - apply: + file: bad-gitrepositories.yaml + expect: + - check: + ($error != null): true diff --git a/flux-cel/verify-git-repositories/.chainsaw-test-rename-after-issue-10313-fix/good-gitrepositories.yaml b/flux-cel/verify-git-repositories/.chainsaw-test-rename-after-issue-10313-fix/good-gitrepositories.yaml new file mode 100644 index 000000000..e4ef8599c --- /dev/null +++ b/flux-cel/verify-git-repositories/.chainsaw-test-rename-after-issue-10313-fix/good-gitrepositories.yaml @@ -0,0 +1,15 @@ +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: good-gitrepo-01 +spec: + interval: 5m0s + url: https://github.com/fluxcd/foo +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: good-gitrepo-02 +spec: + interval: 5m0s + url: ssh://git@github.com:fluxcd/bar \ No newline at end of file diff --git a/flux-cel/verify-git-repositories/.chainsaw-test-rename-after-issue-10313-fix/good.yaml b/flux-cel/verify-git-repositories/.chainsaw-test-rename-after-issue-10313-fix/good.yaml new file mode 100644 index 000000000..e4ef8599c --- /dev/null +++ b/flux-cel/verify-git-repositories/.chainsaw-test-rename-after-issue-10313-fix/good.yaml @@ -0,0 +1,15 @@ +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: good-gitrepo-01 +spec: + interval: 5m0s + url: https://github.com/fluxcd/foo +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: good-gitrepo-02 +spec: + interval: 5m0s + url: ssh://git@github.com:fluxcd/bar \ No newline at end of file diff --git a/flux-cel/verify-git-repositories/.chainsaw-test-rename-after-issue-10313-fix/policy-ready.yaml b/flux-cel/verify-git-repositories/.chainsaw-test-rename-after-issue-10313-fix/policy-ready.yaml new file mode 100644 index 000000000..7cf04ed5a --- /dev/null +++ b/flux-cel/verify-git-repositories/.chainsaw-test-rename-after-issue-10313-fix/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: verify-git-repositories +status: + ready: true \ No newline at end of file diff --git a/flux-cel/verify-git-repositories/.kyverno-test/kyverno-test.yaml b/flux-cel/verify-git-repositories/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..e15021fd6 --- /dev/null +++ b/flux-cel/verify-git-repositories/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,25 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: verify-git-repositories +policies: +- ../verify-git-repositories.yaml +resources: +- ../.chainsaw-test-rename-after-issue-10313-fix/good-gitrepositories.yaml +- ../.chainsaw-test-rename-after-issue-10313-fix/bad-gitrepositories.yaml +results: +- policy: verify-git-repositories + rule: github-repositories-only + kind: GitRepository + resources: + - bad-gitrepo-01 + - bad-gitrepo-02 + result: fail +- policy: verify-git-repositories + rule: github-repositories-only + kind: GitRepository + resources: + - good-gitrepo-01 + - good-gitrepo-02 + result: pass + diff --git a/flux-cel/verify-git-repositories/artifacthub-pkg.yml b/flux-cel/verify-git-repositories/artifacthub-pkg.yml new file mode 100644 index 000000000..bdf7ba603 --- /dev/null +++ b/flux-cel/verify-git-repositories/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: verify-git-repositories-cel +version: 1.0.0 +displayName: Verify Git Repositories in CEL expressions +description: >- + Ensures that Git repositories used for Flux deployments in a cluster originate from a specific, trusted organization. Prevents the use of untrusted or potentially risky Git repositories. Protects the integrity and security of Flux deployments. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/flux-cel/verify-git-repositories/verify-git-repositories.yaml + ``` +keywords: + - kyverno + - Flux + - CEL Expressions +readme: | + Ensures that Git repositories used for Flux deployments in a cluster originate from a specific, trusted organization. Prevents the use of untrusted or potentially risky Git repositories. Protects the integrity and security of Flux deployments. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Flux in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "GitRepository" +digest: afbbe3a747cf36d5a83bdc425a9a07c2f2da857187aaf8443d74947cb4920926 +createdAt: "2024-05-11T15:08:13Z" + diff --git a/flux-cel/verify-git-repositories/verify-git-repositories.yaml b/flux-cel/verify-git-repositories/verify-git-repositories.yaml new file mode 100644 index 000000000..5945961f5 --- /dev/null +++ b/flux-cel/verify-git-repositories/verify-git-repositories.yaml @@ -0,0 +1,39 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: verify-git-repositories + annotations: + policies.kyverno.io/title: Verify Git Repositories in CEL expressions + policies.kyverno.io/category: Flux in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: GitRepository + policies.kyverno.io/description: >- + Ensures that Git repositories used for Flux deployments + in a cluster originate from a specific, trusted organization. + Prevents the use of untrusted or potentially risky Git repositories. + Protects the integrity and security of Flux deployments. +spec: + validationFailureAction: Audit + rules: + - name: github-repositories-only + match: + any: + - resources: + kinds: + - GitRepository + operations: + - CREATE + - UPDATE + exclude: + any: + - resources: + namespaces: + - flux-system + validate: + cel: + expressions: + - expression: "object.spec.url.startsWith('https://github.com/fluxcd/') || object.spec.url.startsWith('ssh://git@github.com:fluxcd/')" + message: .spec.url must be from a repository within the organisation X + diff --git a/flux/verify-flux-sources/.chainsaw-test/repo-bad-git.yaml b/flux/verify-flux-sources/.chainsaw-test/repo-bad-git.yaml index cfaaa89f2..62998778b 100644 --- a/flux/verify-flux-sources/.chainsaw-test/repo-bad-git.yaml +++ b/flux/verify-flux-sources/.chainsaw-test/repo-bad-git.yaml @@ -1,7 +1,7 @@ apiVersion: source.toolkit.fluxcd.io/v1 kind: GitRepository metadata: - name: gitrepo-01 + name: bad-gitrepo-01 spec: interval: 5m0s url: https://github.com/notmyorg/podinfo \ No newline at end of file diff --git a/flux/verify-flux-sources/.chainsaw-test/repo-bad-helm.yaml b/flux/verify-flux-sources/.chainsaw-test/repo-bad-helm.yaml index aa017599f..11a996a11 100644 --- a/flux/verify-flux-sources/.chainsaw-test/repo-bad-helm.yaml +++ b/flux/verify-flux-sources/.chainsaw-test/repo-bad-helm.yaml @@ -1,7 +1,7 @@ apiVersion: source.toolkit.fluxcd.io/v1beta2 kind: HelmRepository metadata: - name: helmrepo-03 + name: bad-helmrepo-01 spec: interval: 5m0s url: https://helmrepo.github.io/podinfo \ No newline at end of file diff --git a/flux/verify-flux-sources/.chainsaw-test/repo-good-bucket.yaml b/flux/verify-flux-sources/.chainsaw-test/repo-good-bucket.yaml index b9bedb513..0669a4190 100644 --- a/flux/verify-flux-sources/.chainsaw-test/repo-good-bucket.yaml +++ b/flux/verify-flux-sources/.chainsaw-test/repo-good-bucket.yaml @@ -1,7 +1,7 @@ apiVersion: source.toolkit.fluxcd.io/v1beta2 kind: Bucket metadata: - name: bucket-01 + name: good-bucket-01 spec: interval: 5m0s endpoint: minio.myorg.com @@ -10,7 +10,7 @@ spec: apiVersion: source.toolkit.fluxcd.io/v1beta2 kind: Bucket metadata: - name: bucket-02 + name: good-bucket-02 namespace: flux-system spec: interval: 5m0s diff --git a/flux/verify-flux-sources/.chainsaw-test/repo-good-git.yaml b/flux/verify-flux-sources/.chainsaw-test/repo-good-git.yaml index 8178ad444..e98df9e9c 100644 --- a/flux/verify-flux-sources/.chainsaw-test/repo-good-git.yaml +++ b/flux/verify-flux-sources/.chainsaw-test/repo-good-git.yaml @@ -1,7 +1,7 @@ apiVersion: source.toolkit.fluxcd.io/v1 kind: GitRepository metadata: - name: gitrepo-01 + name: good-gitrepo-01 spec: interval: 5m0s url: https://github.com/myorg/podinfo @@ -9,7 +9,7 @@ spec: apiVersion: source.toolkit.fluxcd.io/v1 kind: GitRepository metadata: - name: gitrepo-02 + name: good-gitrepo-02 spec: interval: 5m0s url: ssh://git@github.com:myorg/podinfo @@ -17,7 +17,7 @@ spec: apiVersion: source.toolkit.fluxcd.io/v1 kind: GitRepository metadata: - name: gitrepo-03 + name: good-gitrepo-03 namespace: flux-system spec: interval: 5m0s diff --git a/flux/verify-flux-sources/.chainsaw-test/repo-good-helm.yaml b/flux/verify-flux-sources/.chainsaw-test/repo-good-helm.yaml index a1b57a1c4..17b32fd4c 100644 --- a/flux/verify-flux-sources/.chainsaw-test/repo-good-helm.yaml +++ b/flux/verify-flux-sources/.chainsaw-test/repo-good-helm.yaml @@ -1,7 +1,7 @@ apiVersion: source.toolkit.fluxcd.io/v1beta2 kind: HelmRepository metadata: - name: helmrepo-01 + name: good-helmrepo-01 spec: interval: 5m0s url: https://helmrepo.myorg.com/podinfo @@ -9,7 +9,7 @@ spec: apiVersion: source.toolkit.fluxcd.io/v1beta2 kind: HelmRepository metadata: - name: helmrepo-02 + name: good-helmrepo-02 namespace: flux-system spec: interval: 5m0s diff --git a/flux/verify-flux-sources/.chainsaw-test/repo-good-image.yaml b/flux/verify-flux-sources/.chainsaw-test/repo-good-image.yaml index a4075d163..beebfd1aa 100644 --- a/flux/verify-flux-sources/.chainsaw-test/repo-good-image.yaml +++ b/flux/verify-flux-sources/.chainsaw-test/repo-good-image.yaml @@ -1,7 +1,7 @@ apiVersion: image.toolkit.fluxcd.io/v1beta2 kind: ImageRepository metadata: - name: imagerepo-01 + name: good-imagerepo-01 spec: image: ghcr.io/myorg/ interval: 1h @@ -10,7 +10,7 @@ spec: apiVersion: image.toolkit.fluxcd.io/v1beta2 kind: ImageRepository metadata: - name: imagerepo-02 + name: good-imagerepo-02 namespace: flux-system spec: image: nothing.io/notmyorg/ diff --git a/flux/verify-flux-sources/.kyverno-test/kyverno-test.yaml b/flux/verify-flux-sources/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..e455f191c --- /dev/null +++ b/flux/verify-flux-sources/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,66 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: verify-flux-sources +policies: +- ../verify-flux-sources.yaml +resources: +- ../.chainsaw-test/repo-bad-bucket.yaml +- ../.chainsaw-test/repo-bad-git.yaml +- ../.chainsaw-test/repo-bad-helm.yaml +- ../.chainsaw-test/repo-bad-image.yaml +- ../.chainsaw-test/repo-good-bucket.yaml +- ../.chainsaw-test/repo-good-git.yaml +- ../.chainsaw-test/repo-good-helm.yaml +- ../.chainsaw-test/repo-good-image.yaml +results: +- policy: verify-flux-sources + rule: flux-github-repositories + kind: GitRepository + resources: + - bad-gitrepo-01 + result: fail +- policy: verify-flux-sources + rule: flux-github-repositories + kind: GitRepository + resources: + - good-gitrepo-01 + - good-gitrepo-02 + result: pass +- policy: verify-flux-sources + rule: flux-buckets + kind: Bucket + resources: + - bucket-bad + result: fail +- policy: verify-flux-sources + rule: flux-buckets + kind: Bucket + resources: + - good-bucket-01 + result: pass +- policy: verify-flux-sources + rule: flux-helm-repositories + kind: HelmRepository + resources: + - bad-helmrepo-01 + result: fail +- policy: verify-flux-sources + rule: flux-helm-repositories + kind: HelmRepository + resources: + - good-helmrepo-01 + result: pass +- policy: verify-flux-sources + rule: flux-image-repositories + kind: ImageRepository + resources: + - imagerepo-bad + result: fail +- policy: verify-flux-sources + rule: flux-image-repositories + kind: ImageRepository + resources: + - good-imagerepo-01 + result: pass + diff --git a/flux/verify-git-repositories/.chainsaw-test/bad-gitrepositories.yaml b/flux/verify-git-repositories/.chainsaw-test/bad-gitrepositories.yaml new file mode 100644 index 000000000..035895270 --- /dev/null +++ b/flux/verify-git-repositories/.chainsaw-test/bad-gitrepositories.yaml @@ -0,0 +1,15 @@ +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: bad-gitrepo-01 +spec: + interval: 5m0s + url: https://github.com/kyverno/foo +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: bad-gitrepo-02 +spec: + interval: 5m0s + url: ssh://git@github.com:kyverno/bar \ No newline at end of file diff --git a/flux/verify-git-repositories/.chainsaw-test/chainsaw-test.yaml b/flux/verify-git-repositories/.chainsaw-test/chainsaw-test.yaml index d04a20a30..43dbc87b0 100644 --- a/flux/verify-git-repositories/.chainsaw-test/chainsaw-test.yaml +++ b/flux/verify-git-repositories/.chainsaw-test/chainsaw-test.yaml @@ -21,11 +21,11 @@ spec: - name: 02 - Create good GitRepository try: - apply: - file: good.yaml + file: good-gitrepositories.yaml - name: 03 - Create bad GitRepository try: - apply: - file: bad.yaml + file: bad-gitrepositories.yaml expect: - check: ($error != null): true diff --git a/flux/verify-git-repositories/.chainsaw-test/good-gitrepositories.yaml b/flux/verify-git-repositories/.chainsaw-test/good-gitrepositories.yaml new file mode 100644 index 000000000..e4ef8599c --- /dev/null +++ b/flux/verify-git-repositories/.chainsaw-test/good-gitrepositories.yaml @@ -0,0 +1,15 @@ +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: good-gitrepo-01 +spec: + interval: 5m0s + url: https://github.com/fluxcd/foo +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: good-gitrepo-02 +spec: + interval: 5m0s + url: ssh://git@github.com:fluxcd/bar \ No newline at end of file diff --git a/flux/verify-git-repositories/.kyverno-test/kyverno-test.yaml b/flux/verify-git-repositories/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..91bae26eb --- /dev/null +++ b/flux/verify-git-repositories/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,25 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: verify-git-repositories +policies: +- ../verify-git-repositories.yaml +resources: +- ../.chainsaw-test/good-gitrepositories.yaml +- ../.chainsaw-test/bad-gitrepositories.yaml +results: +- policy: verify-git-repositories + rule: github-repositories-only + kind: GitRepository + resources: + - bad-gitrepo-01 + - bad-gitrepo-02 + result: fail +- policy: verify-git-repositories + rule: github-repositories-only + kind: GitRepository + resources: + - good-gitrepo-01 + - good-gitrepo-02 + result: pass + diff --git a/istio-cel/enforce-sidecar-injection-namespace/.chainsaw-test/chainsaw-test.yaml b/istio-cel/enforce-sidecar-injection-namespace/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..c160abb64 --- /dev/null +++ b/istio-cel/enforce-sidecar-injection-namespace/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,41 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: enforce-sidecar-injection-namespace +spec: + steps: + - name: step-01 + try: + - apply: + file: ../enforce-sidecar-injection-namespace.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: enforce-sidecar-injection-namespace + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: ns-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: ns-bad-disabled.yaml + - apply: + expect: + - check: + ($error != null): true + file: ns-bad-nolabel.yaml + - apply: + expect: + - check: + ($error != null): true + file: ns-bad-somelabel.yaml diff --git a/istio-cel/enforce-sidecar-injection-namespace/.chainsaw-test/ns-bad-disabled.yaml b/istio-cel/enforce-sidecar-injection-namespace/.chainsaw-test/ns-bad-disabled.yaml new file mode 100644 index 000000000..0eec7ea44 --- /dev/null +++ b/istio-cel/enforce-sidecar-injection-namespace/.chainsaw-test/ns-bad-disabled.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + istio-injection: disabled + name: bad-istio-sinj01 \ No newline at end of file diff --git a/istio-cel/enforce-sidecar-injection-namespace/.chainsaw-test/ns-bad-nolabel.yaml b/istio-cel/enforce-sidecar-injection-namespace/.chainsaw-test/ns-bad-nolabel.yaml new file mode 100644 index 000000000..4caa0efdb --- /dev/null +++ b/istio-cel/enforce-sidecar-injection-namespace/.chainsaw-test/ns-bad-nolabel.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: bad-istio-sinj03 \ No newline at end of file diff --git a/istio-cel/enforce-sidecar-injection-namespace/.chainsaw-test/ns-bad-somelabel.yaml b/istio-cel/enforce-sidecar-injection-namespace/.chainsaw-test/ns-bad-somelabel.yaml new file mode 100644 index 000000000..d25585d2a --- /dev/null +++ b/istio-cel/enforce-sidecar-injection-namespace/.chainsaw-test/ns-bad-somelabel.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + foo: enabled + name: bad-istio-sinj02 \ No newline at end of file diff --git a/istio-cel/enforce-sidecar-injection-namespace/.chainsaw-test/ns-good.yaml b/istio-cel/enforce-sidecar-injection-namespace/.chainsaw-test/ns-good.yaml new file mode 100644 index 000000000..a5f30d2ac --- /dev/null +++ b/istio-cel/enforce-sidecar-injection-namespace/.chainsaw-test/ns-good.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + istio-injection: enabled + name: good-istio-sinj01 +--- +apiVersion: v1 +kind: Namespace +metadata: + labels: + foo: disabled + istio-injection: enabled + bar: enabled + name: good-istio-sinj02 \ No newline at end of file diff --git a/istio-cel/enforce-sidecar-injection-namespace/.chainsaw-test/policy-ready.yaml b/istio-cel/enforce-sidecar-injection-namespace/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..acc3f29fb --- /dev/null +++ b/istio-cel/enforce-sidecar-injection-namespace/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: enforce-sidecar-injection-namespace +status: + ready: true diff --git a/istio-cel/enforce-sidecar-injection-namespace/.kyverno-test/kyverno-test.yaml b/istio-cel/enforce-sidecar-injection-namespace/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..e457fa2b8 --- /dev/null +++ b/istio-cel/enforce-sidecar-injection-namespace/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,28 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: enforce-sidecar-injection-namespace +policies: +- ../enforce-sidecar-injection-namespace.yaml +resources: +- ../.chainsaw-test/ns-bad-disabled.yaml +- ../.chainsaw-test/ns-bad-nolabel.yaml +- ../.chainsaw-test/ns-bad-somelabel.yaml +- ../.chainsaw-test/ns-good.yaml +results: +- policy: enforce-sidecar-injection-namespace + rule: check-istio-injection-enabled + kind: Namespace + resources: + - bad-istio-sinj01 + - bad-istio-sinj02 + - bad-istio-sinj03 + result: fail +- policy: enforce-sidecar-injection-namespace + rule: check-istio-injection-enabled + kind: Namespace + resources: + - good-istio-sinj01 + - good-istio-sinj02 + result: pass + diff --git a/istio-cel/enforce-sidecar-injection-namespace/artifacthub-pkg.yml b/istio-cel/enforce-sidecar-injection-namespace/artifacthub-pkg.yml new file mode 100644 index 000000000..177e3d150 --- /dev/null +++ b/istio-cel/enforce-sidecar-injection-namespace/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: enforce-sidecar-injection-namespace-cel +version: 1.0.0 +displayName: Enforce Istio Sidecar Injection in CEL expressions +description: >- + In order for Istio to inject sidecars to workloads deployed into Namespaces, the label `istio-injection` must be set to `enabled`. This policy ensures that all new Namespaces set `istio-inject` to `enabled`. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/istio-cel/enforce-sidecar-injection-namespace/enforce-sidecar-injection-namespace.yaml + ``` +keywords: + - kyverno + - Istio + - CEL Expressions +readme: | + In order for Istio to inject sidecars to workloads deployed into Namespaces, the label `istio-injection` must be set to `enabled`. This policy ensures that all new Namespaces set `istio-inject` to `enabled`. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Istio in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Namespace" +digest: 123feb2a8d1b2743e33b1f91ddf7291c47eedcf2c24ae537a1d3afe6c503338d +createdAt: "2024-05-12T04:38:32Z" + diff --git a/istio-cel/enforce-sidecar-injection-namespace/enforce-sidecar-injection-namespace.yaml b/istio-cel/enforce-sidecar-injection-namespace/enforce-sidecar-injection-namespace.yaml new file mode 100644 index 000000000..5a2c91d80 --- /dev/null +++ b/istio-cel/enforce-sidecar-injection-namespace/enforce-sidecar-injection-namespace.yaml @@ -0,0 +1,34 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: enforce-sidecar-injection-namespace + annotations: + policies.kyverno.io/title: Enforce Istio Sidecar Injection in CEL expressions + policies.kyverno.io/category: Istio in CEL + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Namespace + policies.kyverno.io/description: >- + In order for Istio to inject sidecars to workloads deployed into Namespaces, the label + `istio-injection` must be set to `enabled`. This policy ensures that all new Namespaces + set `istio-inject` to `enabled`. +spec: + validationFailureAction: Audit + background: true + rules: + - name: check-istio-injection-enabled + match: + any: + - resources: + kinds: + - Namespace + operations: + - CREATE + validate: + cel: + expressions: + - expression: "has(object.metadata.labels) && 'istio-injection' in object.metadata.labels && object.metadata.labels['istio-injection'] == 'enabled'" + message: "All new Namespaces must have Istio sidecar injection enabled." + diff --git a/istio-cel/enforce-strict-mtls/.chainsaw-test/chainsaw-test.yaml b/istio-cel/enforce-strict-mtls/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..e547cafa5 --- /dev/null +++ b/istio-cel/enforce-strict-mtls/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,33 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: enforce-strict-mtls +spec: + steps: + - name: step-01 + try: + - apply: + file: ../enforce-strict-mtls.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: enforce-strict-mtls + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - assert: + file: crd-assert.yaml + - name: step-02 + try: + - apply: + file: pa-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: pa-bad.yaml diff --git a/istio-cel/enforce-strict-mtls/.chainsaw-test/crd-assert.yaml b/istio-cel/enforce-strict-mtls/.chainsaw-test/crd-assert.yaml new file mode 100755 index 000000000..56561a629 --- /dev/null +++ b/istio-cel/enforce-strict-mtls/.chainsaw-test/crd-assert.yaml @@ -0,0 +1,13 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: peerauthentications.security.istio.io +spec: {} +status: + acceptedNames: + kind: PeerAuthentication + listKind: PeerAuthenticationList + plural: peerauthentications + singular: peerauthentication + storedVersions: + - v1beta1 diff --git a/istio-cel/enforce-strict-mtls/.chainsaw-test/pa-bad.yaml b/istio-cel/enforce-strict-mtls/.chainsaw-test/pa-bad.yaml new file mode 100644 index 000000000..771d21f3d --- /dev/null +++ b/istio-cel/enforce-strict-mtls/.chainsaw-test/pa-bad.yaml @@ -0,0 +1,26 @@ +apiVersion: security.istio.io/v1beta1 +kind: PeerAuthentication +metadata: + name: pa-bad01 +spec: + mtls: + mode: PERMISSIVE +--- +apiVersion: security.istio.io/v1beta1 +kind: PeerAuthentication +metadata: + name: pa-bad02 +spec: + mtls: + mode: DISABLE +--- +apiVersion: security.istio.io/v1beta1 +kind: PeerAuthentication +metadata: + name: pa-bad03 +spec: + selector: + matchLabels: + app: finance + mtls: + mode: DISABLE \ No newline at end of file diff --git a/istio-cel/enforce-strict-mtls/.chainsaw-test/pa-good.yaml b/istio-cel/enforce-strict-mtls/.chainsaw-test/pa-good.yaml new file mode 100644 index 000000000..0d2d9d383 --- /dev/null +++ b/istio-cel/enforce-strict-mtls/.chainsaw-test/pa-good.yaml @@ -0,0 +1,39 @@ +apiVersion: security.istio.io/v1beta1 +kind: PeerAuthentication +metadata: + name: good-pa01 +spec: + mtls: + mode: STRICT +--- +apiVersion: security.istio.io/v1beta1 +kind: PeerAuthentication +metadata: + name: good-pa02 +spec: + mtls: + mode: UNSET +--- +apiVersion: security.istio.io/v1beta1 +kind: PeerAuthentication +metadata: + name: good-pa03 +spec: {} +--- +apiVersion: security.istio.io/v1beta1 +kind: PeerAuthentication +metadata: + name: good-pa04 +spec: + selector: + matchLabels: + app: finance + mtls: + mode: STRICT +--- +apiVersion: security.istio.io/v1beta1 +kind: PeerAuthentication +metadata: + name: good-pa05 +spec: + mtls: {} \ No newline at end of file diff --git a/istio-cel/enforce-strict-mtls/.chainsaw-test/policy-ready.yaml b/istio-cel/enforce-strict-mtls/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..c5f7637cb --- /dev/null +++ b/istio-cel/enforce-strict-mtls/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: enforce-strict-mtls +status: + ready: true diff --git a/istio-cel/enforce-strict-mtls/.kyverno-test/kyverno-test.yaml b/istio-cel/enforce-strict-mtls/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..f4018437a --- /dev/null +++ b/istio-cel/enforce-strict-mtls/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,29 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: enforce-strict-mtls +policies: +- ../enforce-strict-mtls.yaml +resources: +- ../.chainsaw-test/pa-bad.yaml +- ../.chainsaw-test/pa-good.yaml +results: +- policy: enforce-strict-mtls + rule: validate-mtls + kind: PeerAuthentication + resources: + - pa-bad01 + - pa-bad02 + - pa-bad03 + result: fail +- policy: enforce-strict-mtls + rule: validate-mtls + kind: PeerAuthentication + resources: + - good-pa01 + - good-pa02 + - good-pa03 + - good-pa04 + - good-pa05 + result: pass + diff --git a/istio-cel/enforce-strict-mtls/artifacthub-pkg.yml b/istio-cel/enforce-strict-mtls/artifacthub-pkg.yml new file mode 100644 index 000000000..e760f1c9e --- /dev/null +++ b/istio-cel/enforce-strict-mtls/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: enforce-strict-mtls-cel +version: 1.0.0 +displayName: Enforce Istio Strict mTLS in CEL expressions +description: >- + Strict mTLS requires that mutual TLS be enabled across the entire service mesh, which can be set using a PeerAuthentication resource on a per-Namespace basis and, if set on the `istio-system` Namespace could disable it across the entire mesh. Disabling mTLS can reduce the security for traffic within that portion of the mesh and should be controlled. This policy prevents disabling strict mTLS in a PeerAuthentication resource by requiring the `mode` be set to either `UNSET` or `STRICT`. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/istio-cel/enforce-strict-mtls/enforce-strict-mtls.yaml + ``` +keywords: + - kyverno + - Istio + - CEL Expressions +readme: | + Strict mTLS requires that mutual TLS be enabled across the entire service mesh, which can be set using a PeerAuthentication resource on a per-Namespace basis and, if set on the `istio-system` Namespace could disable it across the entire mesh. Disabling mTLS can reduce the security for traffic within that portion of the mesh and should be controlled. This policy prevents disabling strict mTLS in a PeerAuthentication resource by requiring the `mode` be set to either `UNSET` or `STRICT`. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Istio in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "PeerAuthentication" +digest: 6bf5af52d9187ac5b1b90879ab3975ea618b38d04928ceecd4779fc2b2e4b26a +createdAt: "2024-05-12T04:41:47Z" + diff --git a/istio-cel/enforce-strict-mtls/enforce-strict-mtls.yaml b/istio-cel/enforce-strict-mtls/enforce-strict-mtls.yaml new file mode 100644 index 000000000..33747bbfd --- /dev/null +++ b/istio-cel/enforce-strict-mtls/enforce-strict-mtls.yaml @@ -0,0 +1,40 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: enforce-strict-mtls + annotations: + policies.kyverno.io/title: Enforce Istio Strict mTLS in CEL expressions + policies.kyverno.io/category: Istio in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: PeerAuthentication + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Strict mTLS requires that mutual TLS be enabled across the entire service mesh, which + can be set using a PeerAuthentication resource on a per-Namespace basis and, if set on + the `istio-system` Namespace could disable it across the entire mesh. Disabling mTLS + can reduce the security for traffic within that portion of the mesh and should be controlled. + This policy prevents disabling strict mTLS in a PeerAuthentication resource by requiring + the `mode` be set to either `UNSET` or `STRICT`. +spec: + validationFailureAction: Audit + background: true + rules: + - name: validate-mtls + match: + any: + - resources: + kinds: + - PeerAuthentication + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + !has(object.spec) || !has(object.spec.mtls) || !has(object.spec.mtls.mode) || + object.spec.mtls.mode in ['UNSET', 'STRICT'] + message: "PeerAuthentication resources may only set UNSET or STRICT for the mode." + diff --git a/istio-cel/prevent-disabling-injection-pods/.chainsaw-test/chainsaw-test.yaml b/istio-cel/prevent-disabling-injection-pods/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..a1695faa6 --- /dev/null +++ b/istio-cel/prevent-disabling-injection-pods/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,38 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: prevent-disabling-injection-pods +spec: + steps: + - name: step-01 + try: + - apply: + file: ../prevent-disabling-injection-pods.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: prevent-disabling-injection-pods + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: pod-good.yaml + - apply: + file: podcontroller-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: pod-bad.yaml + - apply: + expect: + - check: + ($error != null): true + file: podcontroller-bad.yaml diff --git a/istio-cel/prevent-disabling-injection-pods/.chainsaw-test/pod-bad.yaml b/istio-cel/prevent-disabling-injection-pods/.chainsaw-test/pod-bad.yaml new file mode 100644 index 000000000..3f2e6b492 --- /dev/null +++ b/istio-cel/prevent-disabling-injection-pods/.chainsaw-test/pod-bad.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + foo: bar + annotations: + app.k8s.io/name: badpod01 + sidecar.istio.io/inject: "false" + name: badpod01 +spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + foo: bar + annotations: + sidecar.istio.io/inject: "false" + app.k8s.io/name: badpod02 + name: badpod02 +spec: + containers: + - name: busybox + image: busybox:1.35 \ No newline at end of file diff --git a/istio-cel/prevent-disabling-injection-pods/.chainsaw-test/pod-good.yaml b/istio-cel/prevent-disabling-injection-pods/.chainsaw-test/pod-good.yaml new file mode 100644 index 000000000..de1e03c32 --- /dev/null +++ b/istio-cel/prevent-disabling-injection-pods/.chainsaw-test/pod-good.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + foo: bar + annotations: + app.k8s.io/name: goodpod01 + sidecar.istio.io/inject: "true" + name: goodpod01 +spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + foo: bar + annotations: + app.k8s.io/name: goodpod02 + name: goodpod02 +spec: + containers: + - name: busybox + image: busybox:1.35 \ No newline at end of file diff --git a/istio-cel/prevent-disabling-injection-pods/.chainsaw-test/podcontroller-bad.yaml b/istio-cel/prevent-disabling-injection-pods/.chainsaw-test/podcontroller-bad.yaml new file mode 100644 index 000000000..90c90ffc0 --- /dev/null +++ b/istio-cel/prevent-disabling-injection-pods/.chainsaw-test/podcontroller-bad.yaml @@ -0,0 +1,89 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: baddeploy01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + template: + metadata: + annotations: + sidecar.istio.io/inject: "false" + app.k8s.io/name: busybox + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: baddeploy02 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + template: + metadata: + annotations: + app.k8s.io/name: busybox + sidecar.istio.io/inject: "false" + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + annotations: + app.k8s.io/name: busybox + sidecar.istio.io/inject: "false" + spec: + containers: + - name: hello + image: busybox:1.35 + command: + - sleep + - "3600" + restartPolicy: OnFailure +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob02 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + annotations: + sidecar.istio.io/inject: "false" + app.k8s.io/name: busybox + spec: + containers: + - name: hello + image: busybox:1.35 + command: + - sleep + - "3600" + restartPolicy: OnFailure \ No newline at end of file diff --git a/istio-cel/prevent-disabling-injection-pods/.chainsaw-test/podcontroller-good.yaml b/istio-cel/prevent-disabling-injection-pods/.chainsaw-test/podcontroller-good.yaml new file mode 100644 index 000000000..59d1afa1b --- /dev/null +++ b/istio-cel/prevent-disabling-injection-pods/.chainsaw-test/podcontroller-good.yaml @@ -0,0 +1,87 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: gooddeploy01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + template: + metadata: + annotations: + app.k8s.io/name: busybox + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: gooddeploy02 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + template: + metadata: + annotations: + app.k8s.io/name: busybox + sidecar.istio.io/inject: "true" + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + annotations: + app.k8s.io/name: busybox + spec: + containers: + - name: hello + image: busybox:1.35 + command: + - "sleep" + - "3600" + restartPolicy: OnFailure +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob02 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + annotations: + app.k8s.io/name: busybox + sidecar.istio.io/inject: "true" + spec: + containers: + - name: hello + image: busybox:1.35 + command: + - "sleep" + - "3600" + restartPolicy: OnFailure \ No newline at end of file diff --git a/istio-cel/prevent-disabling-injection-pods/.chainsaw-test/policy-ready.yaml b/istio-cel/prevent-disabling-injection-pods/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..4c6866bd0 --- /dev/null +++ b/istio-cel/prevent-disabling-injection-pods/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: prevent-disabling-injection-pods +status: + ready: true diff --git a/istio-cel/prevent-disabling-injection-pods/.kyverno-test/kyverno-test.yaml b/istio-cel/prevent-disabling-injection-pods/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..5aa5be9ad --- /dev/null +++ b/istio-cel/prevent-disabling-injection-pods/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,55 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: prevent-disabling-injection-pods +policies: +- ../prevent-disabling-injection-pods.yaml +resources: +- ../.chainsaw-test/pod-bad.yaml +- ../.chainsaw-test/podcontroller-bad.yaml +- ../.chainsaw-test/pod-good.yaml +- ../.chainsaw-test/podcontroller-good.yaml +results: +- policy: prevent-disabling-injection-pods + rule: prohibit-inject-annotation + kind: Pod + resources: + - badpod01 + - badpod02 + result: fail +- policy: prevent-disabling-injection-pods + rule: prohibit-inject-annotation + kind: Deployment + resources: + - baddeploy01 + - baddeploy02 + result: fail +- policy: prevent-disabling-injection-pods + rule: prohibit-inject-annotation + kind: CronJob + resources: + - badcronjob01 + - badcronjob02 + result: fail +- policy: prevent-disabling-injection-pods + rule: prohibit-inject-annotation + kind: Pod + resources: + - goodpod01 + - goodpod02 + result: pass +- policy: prevent-disabling-injection-pods + rule: prohibit-inject-annotation + kind: Deployment + resources: + - gooddeploy01 + - gooddeploy02 + result: pass +- policy: prevent-disabling-injection-pods + rule: prohibit-inject-annotation + kind: CronJob + resources: + - goodcronjob01 + - goodcronjob02 + result: pass + diff --git a/istio-cel/prevent-disabling-injection-pods/artifacthub-pkg.yml b/istio-cel/prevent-disabling-injection-pods/artifacthub-pkg.yml new file mode 100644 index 000000000..36ec09a25 --- /dev/null +++ b/istio-cel/prevent-disabling-injection-pods/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: prevent-disabling-injection-pods-cel +version: 1.0.0 +displayName: Prevent Disabling Istio Sidecar Injection in CEL expressions +description: >- + One way sidecar injection in an Istio service mesh may be accomplished is by defining an annotation at the Pod level. Pods not receiving a sidecar cannot participate in the mesh thereby reducing visibility. This policy ensures that Pods cannot set the annotation `sidecar.istio.io/inject` to a value of `false`. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/istio-cel/prevent-disabling-injection-pods/prevent-disabling-injection-pods.yaml + ``` +keywords: + - kyverno + - Istio + - CEL Expressions +readme: | + One way sidecar injection in an Istio service mesh may be accomplished is by defining an annotation at the Pod level. Pods not receiving a sidecar cannot participate in the mesh thereby reducing visibility. This policy ensures that Pods cannot set the annotation `sidecar.istio.io/inject` to a value of `false`. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Istio in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Pod" +digest: 5de03c078273ce913a6ebf9064a85be4255b82e36f74bda822984e261363fe8b +createdAt: "2024-05-12T04:48:58Z" + diff --git a/istio-cel/prevent-disabling-injection-pods/prevent-disabling-injection-pods.yaml b/istio-cel/prevent-disabling-injection-pods/prevent-disabling-injection-pods.yaml new file mode 100644 index 000000000..6662e5151 --- /dev/null +++ b/istio-cel/prevent-disabling-injection-pods/prevent-disabling-injection-pods.yaml @@ -0,0 +1,38 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: prevent-disabling-injection-pods + annotations: + policies.kyverno.io/title: Prevent Disabling Istio Sidecar Injection in CEL expressions + policies.kyverno.io/category: Istio in CEL + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + One way sidecar injection in an Istio service mesh may be accomplished is by defining + an annotation at the Pod level. Pods not receiving a sidecar cannot participate in the mesh + thereby reducing visibility. This policy ensures that Pods cannot set the annotation + `sidecar.istio.io/inject` to a value of `false`. +spec: + validationFailureAction: Audit + background: true + rules: + - name: prohibit-inject-annotation + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + !has(object.metadata.annotations) || !('sidecar.istio.io/inject' in object.metadata.annotations) || + object.metadata.annotations['sidecar.istio.io/inject'] != 'false' + message: "Pods may not disable sidecar injection by setting the annotation sidecar.istio.io/inject to a value of false." + diff --git a/istio/add-ambient-mode-namespace/.chainsaw-test/chainsaw-step-02-apply-1.yaml b/istio/add-ambient-mode-namespace/.chainsaw-test/chainsaw-step-02-apply-1.yaml new file mode 100644 index 000000000..71651310f --- /dev/null +++ b/istio/add-ambient-mode-namespace/.chainsaw-test/chainsaw-step-02-apply-1.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + istio.io/dataplane-mode: ambient + name: istio-test-en-ns diff --git a/istio/add-ambient-mode-namespace/.chainsaw-test/chainsaw-step-02-apply-2.yaml b/istio/add-ambient-mode-namespace/.chainsaw-test/chainsaw-step-02-apply-2.yaml new file mode 100644 index 000000000..32cbd8936 --- /dev/null +++ b/istio/add-ambient-mode-namespace/.chainsaw-test/chainsaw-step-02-apply-2.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + istio.io/dataplane-mode: other + name: istio-test-dis-ns diff --git a/istio/add-ambient-mode-namespace/.chainsaw-test/chainsaw-step-02-apply-3.yaml b/istio/add-ambient-mode-namespace/.chainsaw-test/chainsaw-step-02-apply-3.yaml new file mode 100644 index 000000000..6b17ee831 --- /dev/null +++ b/istio/add-ambient-mode-namespace/.chainsaw-test/chainsaw-step-02-apply-3.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: istio-test-none-ns diff --git a/istio/add-ambient-mode-namespace/.chainsaw-test/chainsaw-step-02-apply-4.yaml b/istio/add-ambient-mode-namespace/.chainsaw-test/chainsaw-step-02-apply-4.yaml new file mode 100644 index 000000000..7b14de9b6 --- /dev/null +++ b/istio/add-ambient-mode-namespace/.chainsaw-test/chainsaw-step-02-apply-4.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + foo: bar + name: istio-test-alt-ns diff --git a/istio/add-ambient-mode-namespace/.chainsaw-test/chainsaw-test.yaml b/istio/add-ambient-mode-namespace/.chainsaw-test/chainsaw-test.yaml new file mode 100644 index 000000000..51c8ca8fb --- /dev/null +++ b/istio/add-ambient-mode-namespace/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,34 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: add-ambient-mode-namespace +spec: + steps: + - name: step-01 + try: + - apply: + file: ../add-ambient-mode-namespace.yaml + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: chainsaw-step-02-apply-1.yaml + - apply: + file: chainsaw-step-02-apply-2.yaml + - apply: + file: chainsaw-step-02-apply-3.yaml + - apply: + file: chainsaw-step-02-apply-4.yaml + - name: step-03 + try: + - assert: + file: patched-ns-alt.yaml + - assert: + file: patched-ns-disabled.yaml + - assert: + file: patched-ns-enabled.yaml + - assert: + file: patched-ns-none.yaml diff --git a/istio/add-ambient-mode-namespace/.chainsaw-test/patched-ns-alt.yaml b/istio/add-ambient-mode-namespace/.chainsaw-test/patched-ns-alt.yaml new file mode 100644 index 000000000..7ad1fb2fe --- /dev/null +++ b/istio/add-ambient-mode-namespace/.chainsaw-test/patched-ns-alt.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + foo: bar + istio.io/dataplane-mode: ambient + name: istio-test-alt-ns \ No newline at end of file diff --git a/istio/add-ambient-mode-namespace/.chainsaw-test/patched-ns-disabled.yaml b/istio/add-ambient-mode-namespace/.chainsaw-test/patched-ns-disabled.yaml new file mode 100644 index 000000000..95de97e29 --- /dev/null +++ b/istio/add-ambient-mode-namespace/.chainsaw-test/patched-ns-disabled.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + istio.io/dataplane-mode: ambient + name: istio-test-dis-ns \ No newline at end of file diff --git a/istio/add-ambient-mode-namespace/.chainsaw-test/patched-ns-enabled.yaml b/istio/add-ambient-mode-namespace/.chainsaw-test/patched-ns-enabled.yaml new file mode 100644 index 000000000..ee122e92b --- /dev/null +++ b/istio/add-ambient-mode-namespace/.chainsaw-test/patched-ns-enabled.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + istio.io/dataplane-mode: ambient + name: istio-test-en-ns \ No newline at end of file diff --git a/istio/add-ambient-mode-namespace/.chainsaw-test/patched-ns-none.yaml b/istio/add-ambient-mode-namespace/.chainsaw-test/patched-ns-none.yaml new file mode 100644 index 000000000..c13793cf5 --- /dev/null +++ b/istio/add-ambient-mode-namespace/.chainsaw-test/patched-ns-none.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + istio.io/dataplane-mode: ambient + name: istio-test-none-ns \ No newline at end of file diff --git a/istio/add-ambient-mode-namespace/.chainsaw-test/policy-ready.yaml b/istio/add-ambient-mode-namespace/.chainsaw-test/policy-ready.yaml new file mode 100644 index 000000000..12870b244 --- /dev/null +++ b/istio/add-ambient-mode-namespace/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: add-ambient-mode-namespace +status: + ready: true \ No newline at end of file diff --git a/istio/add-ambient-mode-namespace/.kyverno-test/kyverno-test.yaml b/istio/add-ambient-mode-namespace/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..1b1fa13e0 --- /dev/null +++ b/istio/add-ambient-mode-namespace/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,21 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: add-ambient-mode-namespace +policies: +- ../add-ambient-mode-namespace.yaml +resources: +- ../.chainsaw-test/patched-ns-disabled.yaml +- ../.chainsaw-test/patched-ns-enabled.yaml +- ../.chainsaw-test/patched-ns-alt.yaml +- ../.chainsaw-test/patched-ns-none.yaml +results: +- policy: add-ambient-mode-namespace + rule: check-ambient-mode-enabled + kind: Namespace + resources: + - istio-test-none-ns + - istio-test-dis-ns + - istio-test-en-ns + - istio-test-alt-ns + result: pass diff --git a/istio/add-ambient-mode-namespace/add-ambient-mode-namespace.yaml b/istio/add-ambient-mode-namespace/add-ambient-mode-namespace.yaml new file mode 100644 index 000000000..df5fd0992 --- /dev/null +++ b/istio/add-ambient-mode-namespace/add-ambient-mode-namespace.yaml @@ -0,0 +1,30 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: add-ambient-mode-namespace + annotations: + policies.kyverno.io/title: Add Istio Ambient Mode + policies.kyverno.io/category: Istio + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.8.0 + policies.kyverno.io/minversion: 1.6.0 + kyverno.io/kubernetes-version: "1.24" + policies.kyverno.io/subject: Namespace + policies.kyverno.io/description: >- + In order for Istio to include namespaces in ambient mode, the label `istio.io/dataplane-mode` + must be set to `ambient`. As an alternative to rejecting Namespace definitions which don't already + contain this label, it can be added automatically. This policy adds the label `istio.io/dataplane-mode` + set to `ambient` for all new Namespaces. +spec: + rules: + - name: add-ambient-mode-enabled + match: + any: + - resources: + kinds: + - Namespace + mutate: + patchStrategicMerge: + metadata: + labels: + istio.io/dataplane-mode: ambient diff --git a/istio/add-ambient-mode-namespace/artifacthub-pkg.yml b/istio/add-ambient-mode-namespace/artifacthub-pkg.yml new file mode 100644 index 000000000..7d3226555 --- /dev/null +++ b/istio/add-ambient-mode-namespace/artifacthub-pkg.yml @@ -0,0 +1,22 @@ +name: add-ambient-mode-namespace +version: 1.0.0 +displayName: Add Istio Ambient Mode +createdAt: "2024-07-25T20:07:52.000Z" +description: >- + In order for Istio to include namespaces in ambient mode, the label `istio.io/dataplane-mode` must be set to `ambient`. As an alternative to rejecting Namespace definitions which don't already contain this label, it can be added automatically. This policy adds the label `istio.io/dataplane-mode` set to `ambient` for all new Namespaces. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/istio/add-ambient-mode-namespace/add-ambient-mode-namespace.yaml + ``` +keywords: + - kyverno + - Istio +readme: | + In order for Istio to include namespaces in ambient mode, the label `istio.io/dataplane-mode` must be set to `ambient`. As an alternative to rejecting Namespace definitions which don't already contain this label, it can be added automatically. This policy adds the label `istio.io/dataplane-mode` set to `ambient` for all new Namespaces. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Istio" + kyverno/kubernetesVersion: "1.24" + kyverno/subject: "Namespace" +digest: f81b9ba15c410e62589f0bf79b22a694b41a2294557c91d3c87683772922a8c0 diff --git a/istio/enforce-ambient-mode-namespace/.chainsaw-test/chainsaw-step-01-assert-1.yaml b/istio/enforce-ambient-mode-namespace/.chainsaw-test/chainsaw-step-01-assert-1.yaml new file mode 100644 index 000000000..5e9e5da0c --- /dev/null +++ b/istio/enforce-ambient-mode-namespace/.chainsaw-test/chainsaw-step-01-assert-1.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: enforce-ambient-mode-namespace +status: + ready: true diff --git a/istio/enforce-ambient-mode-namespace/.chainsaw-test/chainsaw-test.yaml b/istio/enforce-ambient-mode-namespace/.chainsaw-test/chainsaw-test.yaml new file mode 100644 index 000000000..ba6b3d82f --- /dev/null +++ b/istio/enforce-ambient-mode-namespace/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,41 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: enforce-ambient-mode-namespace +spec: + steps: + - name: step-01 + try: + - apply: + file: ../enforce-ambient-mode-namespace.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: enforce-ambient-mode-namespace + spec: + validationFailureAction: Enforce + - assert: + file: chainsaw-step-01-assert-1.yaml + - name: step-02 + try: + - apply: + file: ns-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: ns-bad-disabled.yaml + - apply: + expect: + - check: + ($error != null): true + file: ns-bad-nolabel.yaml + - apply: + expect: + - check: + ($error != null): true + file: ns-bad-somelabel.yaml diff --git a/istio/enforce-ambient-mode-namespace/.chainsaw-test/ns-bad-disabled.yaml b/istio/enforce-ambient-mode-namespace/.chainsaw-test/ns-bad-disabled.yaml new file mode 100644 index 000000000..0915ecd8e --- /dev/null +++ b/istio/enforce-ambient-mode-namespace/.chainsaw-test/ns-bad-disabled.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + istio.io/dataplane-mode: other + name: bad-istio-amb01 \ No newline at end of file diff --git a/istio/enforce-ambient-mode-namespace/.chainsaw-test/ns-bad-nolabel.yaml b/istio/enforce-ambient-mode-namespace/.chainsaw-test/ns-bad-nolabel.yaml new file mode 100644 index 000000000..50c60d84f --- /dev/null +++ b/istio/enforce-ambient-mode-namespace/.chainsaw-test/ns-bad-nolabel.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: bad-istio-amb03 \ No newline at end of file diff --git a/istio/enforce-ambient-mode-namespace/.chainsaw-test/ns-bad-somelabel.yaml b/istio/enforce-ambient-mode-namespace/.chainsaw-test/ns-bad-somelabel.yaml new file mode 100644 index 000000000..d18925001 --- /dev/null +++ b/istio/enforce-ambient-mode-namespace/.chainsaw-test/ns-bad-somelabel.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + foo: enabled + name: bad-istio-amb02 \ No newline at end of file diff --git a/istio/enforce-ambient-mode-namespace/.chainsaw-test/ns-good.yaml b/istio/enforce-ambient-mode-namespace/.chainsaw-test/ns-good.yaml new file mode 100644 index 000000000..7520123b5 --- /dev/null +++ b/istio/enforce-ambient-mode-namespace/.chainsaw-test/ns-good.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + istio.io/dataplane-mode: ambient + name: good-istio-amb01 +--- +apiVersion: v1 +kind: Namespace +metadata: + labels: + foo: disabled + istio.io/dataplane-mode: ambient + bar: enabled + name: good-istio-amb02 \ No newline at end of file diff --git a/istio/enforce-ambient-mode-namespace/.kyverno-test/kyverno-test.yaml b/istio/enforce-ambient-mode-namespace/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..e2e458be1 --- /dev/null +++ b/istio/enforce-ambient-mode-namespace/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,28 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: enforce-ambient-mode-namespace +policies: +- ../enforce-ambient-mode-namespace.yaml +resources: +- ../.chainsaw-test/ns-bad-disabled.yaml +- ../.chainsaw-test/ns-bad-nolabel.yaml +- ../.chainsaw-test/ns-bad-somelabel.yaml +- ../.chainsaw-test/ns-good.yaml +results: +- policy: enforce-ambient-mode-namespace + rule: check-ambient-mode-enabled + kind: Namespace + resources: + - bad-istio-amb01 + - bad-istio-amb02 + - bad-istio-amb03 + result: fail +- policy: enforce-ambient-mode-namespace + rule: check-ambient-mode-enabled + kind: Namespace + resources: + - good-istio-amb01 + - good-istio-amb02 + result: pass + diff --git a/istio/enforce-ambient-mode-namespace/artifacthub-pkg.yml b/istio/enforce-ambient-mode-namespace/artifacthub-pkg.yml new file mode 100644 index 000000000..a01e95c16 --- /dev/null +++ b/istio/enforce-ambient-mode-namespace/artifacthub-pkg.yml @@ -0,0 +1,22 @@ +name: enforce-ambient-mode-namespace +version: 1.0.0 +displayName: Enforce Istio Ambient Mode +createdAt: "2024-07-25T20:07:52.000Z" +description: >- + In order for Istio to include namespaces in ambient mode, the label `istio.io/dataplane-mode` must be set to `ambient`. This policy ensures that all new Namespaces set `istio.io/dataplane-mode` to `ambient`. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/istio/enforce-ambient-mode-namespace/enforce-ambient-mode-namespace.yaml + ``` +keywords: + - kyverno + - Istio +readme: | + In order for Istio to include namespaces in ambient mode, the label `istio.io/dataplane-mode` must be set to `ambient`. This policy ensures that all new Namespaces set `istio.io/dataplane-mode` to `ambient`. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Istio" + kyverno/kubernetesVersion: "1.24" + kyverno/subject: "Namespace" +digest: 1d0f6644ba09afd6fe0dcb431b434c509b995580a5fef2f795df2fc979c6a931 diff --git a/istio/enforce-ambient-mode-namespace/enforce-ambient-mode-namespace.yaml b/istio/enforce-ambient-mode-namespace/enforce-ambient-mode-namespace.yaml new file mode 100644 index 000000000..0428f52c5 --- /dev/null +++ b/istio/enforce-ambient-mode-namespace/enforce-ambient-mode-namespace.yaml @@ -0,0 +1,32 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: enforce-ambient-mode-namespace + annotations: + policies.kyverno.io/title: Enforce Istio Ambient Mode + policies.kyverno.io/category: Istio + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.8.0 + policies.kyverno.io/minversion: 1.6.0 + kyverno.io/kubernetes-version: "1.24" + policies.kyverno.io/subject: Namespace + policies.kyverno.io/description: >- + In order for Istio to include namespaces in ambient mode, the label + `istio.io/dataplane-mode` must be set to `ambient`. This policy ensures that all new Namespaces + set `istio.io/dataplane-mode` to `ambient`. +spec: + validationFailureAction: audit + background: true + rules: + - name: check-amblient-mode-enabled + match: + any: + - resources: + kinds: + - Namespace + validate: + message: "All new Namespaces must have Istio ambient mode enabled." + pattern: + metadata: + labels: + istio.io/dataplane-mode: ambient diff --git a/istio/enforce-sidecar-injection-namespace/.kyverno-test/kyverno-test.yaml b/istio/enforce-sidecar-injection-namespace/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..e457fa2b8 --- /dev/null +++ b/istio/enforce-sidecar-injection-namespace/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,28 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: enforce-sidecar-injection-namespace +policies: +- ../enforce-sidecar-injection-namespace.yaml +resources: +- ../.chainsaw-test/ns-bad-disabled.yaml +- ../.chainsaw-test/ns-bad-nolabel.yaml +- ../.chainsaw-test/ns-bad-somelabel.yaml +- ../.chainsaw-test/ns-good.yaml +results: +- policy: enforce-sidecar-injection-namespace + rule: check-istio-injection-enabled + kind: Namespace + resources: + - bad-istio-sinj01 + - bad-istio-sinj02 + - bad-istio-sinj03 + result: fail +- policy: enforce-sidecar-injection-namespace + rule: check-istio-injection-enabled + kind: Namespace + resources: + - good-istio-sinj01 + - good-istio-sinj02 + result: pass + diff --git a/istio/enforce-strict-mtls/.kyverno-test/kyverno-test.yaml b/istio/enforce-strict-mtls/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..f4018437a --- /dev/null +++ b/istio/enforce-strict-mtls/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,29 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: enforce-strict-mtls +policies: +- ../enforce-strict-mtls.yaml +resources: +- ../.chainsaw-test/pa-bad.yaml +- ../.chainsaw-test/pa-good.yaml +results: +- policy: enforce-strict-mtls + rule: validate-mtls + kind: PeerAuthentication + resources: + - pa-bad01 + - pa-bad02 + - pa-bad03 + result: fail +- policy: enforce-strict-mtls + rule: validate-mtls + kind: PeerAuthentication + resources: + - good-pa01 + - good-pa02 + - good-pa03 + - good-pa04 + - good-pa05 + result: pass + diff --git a/istio/enforce-tls-hosts-host-subnets/.kyverno-test/kyverno-test.yaml b/istio/enforce-tls-hosts-host-subnets/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..8b2e1cf9b --- /dev/null +++ b/istio/enforce-tls-hosts-host-subnets/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,28 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: enforce-tls-hosts-host-subnets +policies: +- ../enforce-tls-hosts-host-subnets.yaml +resources: +- ../.chainsaw-test/dr-bad.yaml +- ../.chainsaw-test/dr-good.yaml +results: +- policy: enforce-tls-hosts-host-subnets + rule: destrule + kind: DestinationRule + resources: + - bad-dr01 + - bad-dr02 + result: fail +- policy: enforce-tls-hosts-host-subnets + rule: destrule + kind: DestinationRule + resources: + - good-dr01 + - good-dr02 + - good-dr03 + - good-dr04 + - good-dr05 + result: pass + diff --git a/istio/prevent-disabling-injection-pods/.chainsaw-test/pod-bad.yaml b/istio/prevent-disabling-injection-pods/.chainsaw-test/pod-bad.yaml index d68283137..3f2e6b492 100644 --- a/istio/prevent-disabling-injection-pods/.chainsaw-test/pod-bad.yaml +++ b/istio/prevent-disabling-injection-pods/.chainsaw-test/pod-bad.yaml @@ -19,8 +19,8 @@ metadata: foo: bar annotations: sidecar.istio.io/inject: "false" - app.k8s.io/name: badpod01 - name: badpod01 + app.k8s.io/name: badpod02 + name: badpod02 spec: containers: - name: busybox diff --git a/istio/prevent-disabling-injection-pods/.chainsaw-test/podcontroller-bad.yaml b/istio/prevent-disabling-injection-pods/.chainsaw-test/podcontroller-bad.yaml index ad858a2d7..90c90ffc0 100644 --- a/istio/prevent-disabling-injection-pods/.chainsaw-test/podcontroller-bad.yaml +++ b/istio/prevent-disabling-injection-pods/.chainsaw-test/podcontroller-bad.yaml @@ -53,16 +53,17 @@ spec: jobTemplate: spec: template: - spec: + metadata: annotations: app.k8s.io/name: busybox sidecar.istio.io/inject: "false" + spec: containers: - name: hello image: busybox:1.35 command: - sleep - - 3600 + - "3600" restartPolicy: OnFailure --- apiVersion: batch/v1 @@ -74,14 +75,15 @@ spec: jobTemplate: spec: template: - spec: + metadata: annotations: sidecar.istio.io/inject: "false" app.k8s.io/name: busybox + spec: containers: - name: hello image: busybox:1.35 - command: + command: - sleep - - 3600 + - "3600" restartPolicy: OnFailure \ No newline at end of file diff --git a/istio/prevent-disabling-injection-pods/.chainsaw-test/podcontroller-good.yaml b/istio/prevent-disabling-injection-pods/.chainsaw-test/podcontroller-good.yaml index 387a650ab..59d1afa1b 100644 --- a/istio/prevent-disabling-injection-pods/.chainsaw-test/podcontroller-good.yaml +++ b/istio/prevent-disabling-injection-pods/.chainsaw-test/podcontroller-good.yaml @@ -52,9 +52,10 @@ spec: jobTemplate: spec: template: - spec: + metadata: annotations: app.k8s.io/name: busybox + spec: containers: - name: hello image: busybox:1.35 @@ -72,10 +73,11 @@ spec: jobTemplate: spec: template: - spec: + metadata: annotations: app.k8s.io/name: busybox sidecar.istio.io/inject: "true" + spec: containers: - name: hello image: busybox:1.35 diff --git a/istio/prevent-disabling-injection-pods/.kyverno-test/kyverno-test.yaml b/istio/prevent-disabling-injection-pods/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..5aa5be9ad --- /dev/null +++ b/istio/prevent-disabling-injection-pods/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,55 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: prevent-disabling-injection-pods +policies: +- ../prevent-disabling-injection-pods.yaml +resources: +- ../.chainsaw-test/pod-bad.yaml +- ../.chainsaw-test/podcontroller-bad.yaml +- ../.chainsaw-test/pod-good.yaml +- ../.chainsaw-test/podcontroller-good.yaml +results: +- policy: prevent-disabling-injection-pods + rule: prohibit-inject-annotation + kind: Pod + resources: + - badpod01 + - badpod02 + result: fail +- policy: prevent-disabling-injection-pods + rule: prohibit-inject-annotation + kind: Deployment + resources: + - baddeploy01 + - baddeploy02 + result: fail +- policy: prevent-disabling-injection-pods + rule: prohibit-inject-annotation + kind: CronJob + resources: + - badcronjob01 + - badcronjob02 + result: fail +- policy: prevent-disabling-injection-pods + rule: prohibit-inject-annotation + kind: Pod + resources: + - goodpod01 + - goodpod02 + result: pass +- policy: prevent-disabling-injection-pods + rule: prohibit-inject-annotation + kind: Deployment + resources: + - gooddeploy01 + - gooddeploy02 + result: pass +- policy: prevent-disabling-injection-pods + rule: prohibit-inject-annotation + kind: CronJob + resources: + - goodcronjob01 + - goodcronjob02 + result: pass + diff --git a/kasten-cel/k10-data-protection-by-label/.chainsaw-test/chainsaw-test.yaml b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..81d089924 --- /dev/null +++ b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,60 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: k10-data-protection-by-label +spec: + steps: + - name: step-01 + try: + - apply: + file: ../k10-data-protection-by-label.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: k10-data-protection-by-label + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - assert: + file: crd-assert.yaml + - name: step-02 + try: + - apply: + file: ns.yaml + - apply: + file: deployment-good.yaml + - apply: + file: ss-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: deployment-bad-badlabel.yaml + - apply: + expect: + - check: + ($error != null): true + file: deployment-bad-nolabel.yaml + - apply: + expect: + - check: + ($error != null): true + file: ss-bad-badlabel.yaml + - apply: + expect: + - check: + ($error != null): true + file: ss-bad-nolabel.yaml + - name: step-98 + try: + - script: + content: kubectl delete deployments --all --force --grace-period=0 -n k10-dplabel-ns + - script: + content: kubectl delete statefulsets --all --force --grace-period=0 -n k10-dplabel-ns + - script: + content: kubectl delete pods --all --force --grace-period=0 -n k10-dplabel-ns diff --git a/kasten/k10-generate-gold-backup-policy/.chainsaw-test/chainsaw-step-01-assert-1.yaml b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/crd-assert.yaml similarity index 100% rename from kasten/k10-generate-gold-backup-policy/.chainsaw-test/chainsaw-step-01-assert-1.yaml rename to kasten-cel/k10-data-protection-by-label/.chainsaw-test/crd-assert.yaml diff --git a/kasten/k10-generate-gold-backup-policy/.chainsaw-test/chainsaw-step-03-apply-6.yaml b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/deployment-bad-badlabel.yaml old mode 100755 new mode 100644 similarity index 65% rename from kasten/k10-generate-gold-backup-policy/.chainsaw-test/chainsaw-step-03-apply-6.yaml rename to kasten-cel/k10-data-protection-by-label/.chainsaw-test/deployment-bad-badlabel.yaml index 4ba469633..040ccdb48 --- a/kasten/k10-generate-gold-backup-policy/.chainsaw-test/chainsaw-step-03-apply-6.yaml +++ b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/deployment-bad-badlabel.yaml @@ -1,12 +1,11 @@ apiVersion: apps/v1 kind: Deployment metadata: + name: baddeploy02 labels: app: busybox - dataprotection: k10-goldpolicy purpose: production - name: deploy01 - namespace: k10-gp-ns02 + dataprotection: foo-bar spec: replicas: 1 selector: @@ -18,8 +17,8 @@ spec: app: busybox spec: containers: - - command: - - sleep - - "3600" + - name: busybox image: busybox:1.35 - name: busybox + command: + - "sleep" + - "3600" \ No newline at end of file diff --git a/kasten/k10-generate-gold-backup-policy/.chainsaw-test/chainsaw-step-03-apply-8.yaml b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/deployment-bad-nolabel.yaml old mode 100755 new mode 100644 similarity index 65% rename from kasten/k10-generate-gold-backup-policy/.chainsaw-test/chainsaw-step-03-apply-8.yaml rename to kasten-cel/k10-data-protection-by-label/.chainsaw-test/deployment-bad-nolabel.yaml index 716709323..c34fd4785 --- a/kasten/k10-generate-gold-backup-policy/.chainsaw-test/chainsaw-step-03-apply-8.yaml +++ b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/deployment-bad-nolabel.yaml @@ -1,12 +1,10 @@ apiVersion: apps/v1 kind: Deployment metadata: + name: baddeploy01 labels: app: busybox - dataprotection: k10-simplepolicy purpose: production - name: deploy02 - namespace: k10-gp-ns04 spec: replicas: 1 selector: @@ -18,8 +16,8 @@ spec: app: busybox spec: containers: - - command: - - sleep - - "3600" + - name: busybox image: busybox:1.35 - name: busybox + command: + - "sleep" + - "3600" \ No newline at end of file diff --git a/kasten-cel/k10-data-protection-by-label/.chainsaw-test/deployment-good.yaml b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/deployment-good.yaml new file mode 100644 index 000000000..dcf3c489e --- /dev/null +++ b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/deployment-good.yaml @@ -0,0 +1,75 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeploy01 + namespace: k10-dplabel-ns + labels: + app: busybox + purpose: production + dataprotection: k10-goldpolicy +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 + command: + - "sleep" + - "3600" +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeploy02 + namespace: k10-dplabel-ns + labels: + app: busybox + purpose: development + dataprotection: foo-bar +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 + command: + - "sleep" + - "3600" +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeploy03 + namespace: k10-dplabel-ns + labels: + app: busybox +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 + command: + - "sleep" + - "3600" \ No newline at end of file diff --git a/kasten-cel/k10-data-protection-by-label/.chainsaw-test/nginx-deployment-invalid.yaml b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/nginx-deployment-invalid.yaml new file mode 100644 index 000000000..566318b81 --- /dev/null +++ b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/nginx-deployment-invalid.yaml @@ -0,0 +1,31 @@ +kind: Namespace +apiVersion: v1 +metadata: + name: nginx + labels: + name: nginx +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + namespace: nginx + labels: + app: nginx + purpose: production + dataprotection: none # invalid named K10 Policy!! +spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 diff --git a/kasten/k10-generate-gold-backup-policy/.chainsaw-test/chainsaw-step-03-apply-3.yaml b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/ns.yaml old mode 100755 new mode 100644 similarity index 65% rename from kasten/k10-generate-gold-backup-policy/.chainsaw-test/chainsaw-step-03-apply-3.yaml rename to kasten-cel/k10-data-protection-by-label/.chainsaw-test/ns.yaml index b6924f910..00e9c20e7 --- a/kasten/k10-generate-gold-backup-policy/.chainsaw-test/chainsaw-step-03-apply-3.yaml +++ b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/ns.yaml @@ -1,4 +1,4 @@ apiVersion: v1 kind: Namespace metadata: - name: k10-gp-ns03 + name: k10-dplabel-ns \ No newline at end of file diff --git a/kasten-cel/k10-data-protection-by-label/.chainsaw-test/policy-ready.yaml b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..4e8dfe8c2 --- /dev/null +++ b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: k10-data-protection-by-label +status: + ready: true diff --git a/kasten-cel/k10-data-protection-by-label/.chainsaw-test/ss-bad-badlabel.yaml b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/ss-bad-badlabel.yaml new file mode 100644 index 000000000..cf1a15841 --- /dev/null +++ b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/ss-bad-badlabel.yaml @@ -0,0 +1,23 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: bad-ss02 + labels: + purpose: production + dataprotection: foo-bar +spec: + selector: + matchLabels: + app: busybox + serviceName: busybox-ss + replicas: 1 + minReadySeconds: 10 + template: + metadata: + labels: + app: busybox + spec: + terminationGracePeriodSeconds: 10 + containers: + - name: busybox + image: busybox:1.35 \ No newline at end of file diff --git a/kasten/k10-generate-gold-backup-policy/.chainsaw-test/chainsaw-step-03-apply-5.yaml b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/ss-bad-nolabel.yaml old mode 100755 new mode 100644 similarity index 67% rename from kasten/k10-generate-gold-backup-policy/.chainsaw-test/chainsaw-step-03-apply-5.yaml rename to kasten-cel/k10-data-protection-by-label/.chainsaw-test/ss-bad-nolabel.yaml index 48123e7c6..397a81231 --- a/kasten/k10-generate-gold-backup-policy/.chainsaw-test/chainsaw-step-03-apply-5.yaml +++ b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/ss-bad-nolabel.yaml @@ -1,22 +1,22 @@ apiVersion: apps/v1 kind: StatefulSet metadata: + name: bad-ss01 labels: - dataprotection: k10-goldpolicy purpose: production - name: ss01 - namespace: k10-gp-ns01 spec: - replicas: 1 selector: matchLabels: app: busybox serviceName: busybox-ss + replicas: 1 + minReadySeconds: 10 template: metadata: labels: app: busybox spec: + terminationGracePeriodSeconds: 10 containers: - - image: busybox:1.35 - name: busybox + - name: busybox + image: busybox:1.35 \ No newline at end of file diff --git a/kasten-cel/k10-data-protection-by-label/.chainsaw-test/ss-good.yaml b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/ss-good.yaml new file mode 100644 index 000000000..bc6216c38 --- /dev/null +++ b/kasten-cel/k10-data-protection-by-label/.chainsaw-test/ss-good.yaml @@ -0,0 +1,65 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: good-ss01 + namespace: k10-dplabel-ns + labels: + purpose: production + dataprotection: k10-silverpolicy +spec: + selector: + matchLabels: + app: busybox + serviceName: busybox-ss + replicas: 1 + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: good-ss02 + namespace: k10-dplabel-ns +spec: + selector: + matchLabels: + app: busybox + serviceName: busybox-ss + replicas: 1 + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: good-ss02 + namespace: k10-dplabel-ns + labels: + purpose: development + dataprotection: foo-bar +spec: + selector: + matchLabels: + app: busybox + serviceName: busybox-ss + replicas: 1 + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 \ No newline at end of file diff --git a/kasten-cel/k10-data-protection-by-label/.kyverno-test/kyverno-test.yaml b/kasten-cel/k10-data-protection-by-label/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..58f925a36 --- /dev/null +++ b/kasten-cel/k10-data-protection-by-label/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,21 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: kyverno_data_protection_tests +policies: +- ../k10-data-protection-by-label.yaml +resources: +- nginx-deployment.yaml +results: +- kind: Deployment + policy: k10-data-protection-by-label + resources: + - nginx-deployment-invalid + result: fail + rule: k10-data-protection-by-label +- kind: Deployment + policy: k10-data-protection-by-label + resources: + - nginx-deployment + result: pass + rule: k10-data-protection-by-label diff --git a/kasten-cel/k10-data-protection-by-label/.kyverno-test/nginx-deployment.yaml b/kasten-cel/k10-data-protection-by-label/.kyverno-test/nginx-deployment.yaml new file mode 100644 index 000000000..9ceb00a66 --- /dev/null +++ b/kasten-cel/k10-data-protection-by-label/.kyverno-test/nginx-deployment.yaml @@ -0,0 +1,60 @@ +kind: Namespace +apiVersion: v1 +metadata: + name: nginx + labels: + name: nginx + purpose: production + dataprotection: k10-goldpolicy + immutable: enabled +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + namespace: nginx + labels: + app: nginx + purpose: production + dataprotection: k10-goldpolicy # set a policy to use our 'gold' standard data protection policy (generate-gold-backup-policy) + immutable: enabled +spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment-invalid + namespace: nginx + labels: + app: nginx + purpose: production + dataprotection: none # invalid named K10 Policy!! +spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 diff --git a/kasten-cel/k10-data-protection-by-label/artifacthub-pkg.yml b/kasten-cel/k10-data-protection-by-label/artifacthub-pkg.yml new file mode 100644 index 000000000..2ca7fa978 --- /dev/null +++ b/kasten-cel/k10-data-protection-by-label/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: k10-data-protection-by-label-cel +version: 1.0.0 +displayName: Check Data Protection By Label in CEL expressions +description: >- + Check the 'dataprotection' label that production Deployments and StatefulSet have a named K10 Policy. Use in combination with 'generate' ClusterPolicy to 'generate' a specific K10 Policy by name. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/kasten-cel/k10-data-protection-by-label/k10-data-protection-by-label.yaml + ``` +keywords: + - kyverno + - Kasten K10 by Veeam + - CEL Expressions +readme: | + Check the 'dataprotection' label that production Deployments and StatefulSet have a named K10 Policy. Use in combination with 'generate' ClusterPolicy to 'generate' a specific K10 Policy by name. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Kasten K10 by Veeam in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Deployment, StatefulSet" +digest: e3a088a52aac74e16f9b2776df78891344edd6dc03ee6456dc71d71c34519325 +createdAt: "2024-05-12T07:05:48Z" + diff --git a/kasten-cel/k10-data-protection-by-label/k10-data-protection-by-label.yaml b/kasten-cel/k10-data-protection-by-label/k10-data-protection-by-label.yaml new file mode 100644 index 000000000..58270490c --- /dev/null +++ b/kasten-cel/k10-data-protection-by-label/k10-data-protection-by-label.yaml @@ -0,0 +1,36 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: k10-data-protection-by-label + annotations: + policies.kyverno.io/title: Check Data Protection By Label in CEL expressions + policies.kyverno.io/category: Kasten K10 by Veeam in CEL + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Deployment, StatefulSet + policies.kyverno.io/description: >- + Check the 'dataprotection' label that production Deployments and StatefulSet have a named K10 Policy. + Use in combination with 'generate' ClusterPolicy to 'generate' a specific K10 Policy by name. +spec: + validationFailureAction: Audit + rules: + - name: k10-data-protection-by-label + match: + any: + - resources: + kinds: + - Deployment + - StatefulSet + operations: + - CREATE + - UPDATE + selector: + matchLabels: + purpose: production + validate: + cel: + expressions: + - expression: "has(object.metadata.labels) && has(object.metadata.labels.dataprotection) && object.metadata.labels.dataprotection.startsWith('k10-')" + message: "Deployments and StatefulSets that specify 'dataprotection' label must have a valid k10-?* name (use labels: dataprotection: k10-)" + diff --git a/kasten-cel/k10-hourly-rpo/.chainsaw-test/chainsaw-test.yaml b/kasten-cel/k10-hourly-rpo/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..e02442dae --- /dev/null +++ b/kasten-cel/k10-hourly-rpo/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,33 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: k10-hourly-rpo +spec: + steps: + - name: step-01 + try: + - apply: + file: ../k10-hourly-rpo.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: k10-policy-hourly-rpo + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - assert: + file: crd-assert.yaml + - name: step-02 + try: + - apply: + file: k10-good-policy.yaml + - apply: + expect: + - check: + ($error != null): true + file: k10-bad-policy.yaml diff --git a/kasten-cel/k10-hourly-rpo/.chainsaw-test/crd-assert.yaml b/kasten-cel/k10-hourly-rpo/.chainsaw-test/crd-assert.yaml new file mode 100755 index 000000000..d660e00cb --- /dev/null +++ b/kasten-cel/k10-hourly-rpo/.chainsaw-test/crd-assert.yaml @@ -0,0 +1,13 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: policies.config.kio.kasten.io +spec: {} +status: + acceptedNames: + kind: Policy + listKind: PolicyList + plural: policies + singular: policy + storedVersions: + - v1alpha1 diff --git a/kasten-cel/k10-hourly-rpo/.chainsaw-test/k10-bad-policy.yaml b/kasten-cel/k10-hourly-rpo/.chainsaw-test/k10-bad-policy.yaml new file mode 100644 index 000000000..1bc2dd2d6 --- /dev/null +++ b/kasten-cel/k10-hourly-rpo/.chainsaw-test/k10-bad-policy.yaml @@ -0,0 +1,34 @@ +apiVersion: config.kio.kasten.io/v1alpha1 +kind: Policy +metadata: + name: k10-hourlyrpo-badpolicy01 + labels: + appPriority: Mission-Critical +spec: + comment: My sample custom backup policy + frequency: '@daily' + subFrequency: + minutes: [30] + hours: [22,7] + weekdays: [5] + days: [15] + retention: + daily: 14 + weekly: 4 + monthly: 6 + actions: + - action: backup + - action: export + exportParameters: + frequency: '@monthly' + profile: + name: my-profile + namespace: kasten-io + exportData: + enabled: true + retention: + monthly: 12 + yearly: 5 + selector: + matchLabels: + k10.kasten.io/appNamespace: sampleApp \ No newline at end of file diff --git a/kasten-cel/k10-hourly-rpo/.chainsaw-test/k10-good-policy.yaml b/kasten-cel/k10-hourly-rpo/.chainsaw-test/k10-good-policy.yaml new file mode 100644 index 000000000..8acf13284 --- /dev/null +++ b/kasten-cel/k10-hourly-rpo/.chainsaw-test/k10-good-policy.yaml @@ -0,0 +1,34 @@ +apiVersion: config.kio.kasten.io/v1alpha1 +kind: Policy +metadata: + name: k10-hourlyrpo-goodpolicy01 + labels: + appPriority: Mission-Critical +spec: + comment: My sample custom backup policy + frequency: '@hourly' + subFrequency: + minutes: [30] + hours: [22,7] + weekdays: [5] + days: [15] + retention: + daily: 14 + weekly: 4 + monthly: 6 + actions: + - action: backup + - action: export + exportParameters: + frequency: '@monthly' + profile: + name: my-profile + namespace: kasten-io + exportData: + enabled: true + retention: + monthly: 12 + yearly: 5 + selector: + matchLabels: + k10.kasten.io/appNamespace: sampleApp \ No newline at end of file diff --git a/kasten-cel/k10-hourly-rpo/.chainsaw-test/policy-ready.yaml b/kasten-cel/k10-hourly-rpo/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..3fa1c7221 --- /dev/null +++ b/kasten-cel/k10-hourly-rpo/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: k10-policy-hourly-rpo +status: + ready: true diff --git a/kasten-cel/k10-hourly-rpo/.kyverno-test/backup-export-policy.yaml b/kasten-cel/k10-hourly-rpo/.kyverno-test/backup-export-policy.yaml new file mode 100644 index 000000000..96430f81a --- /dev/null +++ b/kasten-cel/k10-hourly-rpo/.kyverno-test/backup-export-policy.yaml @@ -0,0 +1,73 @@ +# An example compliant K10 Policy +apiVersion: config.kio.kasten.io/v1alpha1 +kind: Policy +metadata: + name: hourly-policy + namespace: kasten-io + labels: + appPriority: Mission-Critical +spec: + comment: My sample custom backup policy + frequency: '@hourly' # change this to @daily to test the 'audit_mission_critical_RPO' policy + subFrequency: + minutes: [30] + hours: [22,7] + weekdays: [5] + days: [15] + retention: + daily: 14 + weekly: 4 + monthly: 6 + actions: + - action: backup + - action: export # comment this line out to test 'enforce_3-2-1' policy + exportParameters: + frequency: '@monthly' + profile: + name: my-profile + namespace: kasten-io + exportData: + enabled: true + retention: + monthly: 12 + yearly: 5 + selector: + matchLabels: + k10.kasten.io/appNamespace: sampleApp +--- +# An example compliant K10 Policy +apiVersion: config.kio.kasten.io/v1alpha1 +kind: Policy +metadata: + name: daily-policy + namespace: kasten-io + labels: + appPriority: Mission-Critical +spec: + comment: My sample custom backup policy + frequency: '@daily' # change this to @daily to test the 'audit_mission_critical_RPO' policy + subFrequency: + minutes: [30] + hours: [22,7] + weekdays: [5] + days: [15] + retention: + daily: 14 + weekly: 4 + monthly: 6 + actions: + - action: backup + - action: export # comment this line out to test 'enforce_3-2-1' policy + exportParameters: + frequency: '@monthly' + profile: + name: my-profile + namespace: kasten-io + exportData: + enabled: true + retention: + monthly: 12 + yearly: 5 + selector: + matchLabels: + k10.kasten.io/appNamespace: sampleApp diff --git a/kasten/k10-minimum-retention/.kyverno-test/kyverno-test.yaml b/kasten-cel/k10-hourly-rpo/.kyverno-test/kyverno-test.yaml similarity index 50% rename from kasten/k10-minimum-retention/.kyverno-test/kyverno-test.yaml rename to kasten-cel/k10-hourly-rpo/.kyverno-test/kyverno-test.yaml index c6b85cb42..965e355f5 100644 --- a/kasten/k10-minimum-retention/.kyverno-test/kyverno-test.yaml +++ b/kasten-cel/k10-hourly-rpo/.kyverno-test/kyverno-test.yaml @@ -3,14 +3,19 @@ kind: Test metadata: name: kyverno_data_protection_tests policies: -- ../k10-minimum-retention.yaml +- ../k10-hourly-rpo.yaml resources: - backup-export-policy.yaml results: - kind: Policy - patchedResource: patched.yaml - policy: k10-minimum-retention + policy: k10-policy-hourly-rpo + resources: + - daily-policy + result: fail + rule: k10-policy-hourly-rpo +- kind: Policy + policy: k10-policy-hourly-rpo resources: - hourly-policy result: pass - rule: k10-minimum-retention + rule: k10-policy-hourly-rpo diff --git a/kasten-cel/k10-hourly-rpo/artifacthub-pkg.yml b/kasten-cel/k10-hourly-rpo/artifacthub-pkg.yml new file mode 100644 index 000000000..bffb28b92 --- /dev/null +++ b/kasten-cel/k10-hourly-rpo/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: k10-hourly-rpo-cel +version: 1.0.0 +displayName: Check Hourly RPO in CEL expressions +description: >- + K10 Policy resources can be educated to adhere to common Recovery Point Objective (RPO) best practices. This policy is advising to use an RPO frequency that with hourly granularity if it has the appPriority: Mission Critical +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/kasten-cel/k10-hourly-rpo/k10-hourly-rpo.yaml + ``` +keywords: + - kyverno + - Kasten K10 by Veeam + - CEL Expressions +readme: | + K10 Policy resources can be educated to adhere to common Recovery Point Objective (RPO) best practices. This policy is advising to use an RPO frequency that with hourly granularity if it has the appPriority: Mission Critical + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Kasten K10 by Veeam in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Policy" +digest: 4c95862e422636b4900103e9620ed8e41d4cafd86984a1d22e81f35767bb0eef +createdAt: "2024-05-12T07:07:31Z" + diff --git a/kasten-cel/k10-hourly-rpo/k10-hourly-rpo.yaml b/kasten-cel/k10-hourly-rpo/k10-hourly-rpo.yaml new file mode 100644 index 000000000..d5f62904f --- /dev/null +++ b/kasten-cel/k10-hourly-rpo/k10-hourly-rpo.yaml @@ -0,0 +1,35 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: k10-policy-hourly-rpo + annotations: + policies.kyverno.io/title: Check Hourly RPO in CEL expressions + policies.kyverno.io/category: Kasten K10 by Veeam in CEL + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Policy + policies.kyverno.io/description: >- + K10 Policy resources can be educated to adhere to common Recovery Point Objective (RPO) best practices. + This policy is advising to use an RPO frequency that with hourly granularity if it has the appPriority: Mission Critical +spec: + validationFailureAction: Audit + rules: + - name: k10-policy-hourly-rpo + match: + any: + - resources: + kinds: + - config.kio.kasten.io/v1alpha1/Policy + operations: + - CREATE + - UPDATE + selector: + matchLabels: + appPriority: Mission-Critical + validate: + cel: + expressions: + - expression: "has(object.spec.frequency) && object.spec.frequency == '@hourly'" + message: "Mission Critical RPO frequency should use no shorter than @hourly frequency" + diff --git a/kasten-cel/k10-validate-ns-by-preset-label/.chainsaw-test/chainsaw-test.yaml b/kasten-cel/k10-validate-ns-by-preset-label/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..8893c6007 --- /dev/null +++ b/kasten-cel/k10-validate-ns-by-preset-label/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,33 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: k10-validate-ns-by-preset-label +spec: + steps: + - name: step-01 + try: + - apply: + file: ../k10-validate-ns-by-preset-label.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: k10-validate-ns-by-preset-label + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - assert: + file: crd-assert.yaml + - name: step-02 + try: + - apply: + file: ns-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: ns-bad.yaml diff --git a/kasten-cel/k10-validate-ns-by-preset-label/.chainsaw-test/crd-assert.yaml b/kasten-cel/k10-validate-ns-by-preset-label/.chainsaw-test/crd-assert.yaml new file mode 100755 index 000000000..d660e00cb --- /dev/null +++ b/kasten-cel/k10-validate-ns-by-preset-label/.chainsaw-test/crd-assert.yaml @@ -0,0 +1,13 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: policies.config.kio.kasten.io +spec: {} +status: + acceptedNames: + kind: Policy + listKind: PolicyList + plural: policies + singular: policy + storedVersions: + - v1alpha1 diff --git a/kasten-cel/k10-validate-ns-by-preset-label/.chainsaw-test/ns-bad.yaml b/kasten-cel/k10-validate-ns-by-preset-label/.chainsaw-test/ns-bad.yaml new file mode 100644 index 000000000..baf81215b --- /dev/null +++ b/kasten-cel/k10-validate-ns-by-preset-label/.chainsaw-test/ns-bad.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: k10-validatens-badns01 + labels: + dataprotection: foo-bar +--- +apiVersion: v1 +kind: Namespace +metadata: + name: k10-validatens-badns02 \ No newline at end of file diff --git a/kasten-cel/k10-validate-ns-by-preset-label/.chainsaw-test/ns-good.yaml b/kasten-cel/k10-validate-ns-by-preset-label/.chainsaw-test/ns-good.yaml new file mode 100644 index 000000000..f9fcdc245 --- /dev/null +++ b/kasten-cel/k10-validate-ns-by-preset-label/.chainsaw-test/ns-good.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: k10-validatens-goodns01 + labels: + dataprotection: gold +--- +apiVersion: v1 +kind: Namespace +metadata: + name: k10-validatens-goodns02 + labels: + dataprotection: silver +--- +apiVersion: v1 +kind: Namespace +metadata: + name: k10-validatens-goodns03 + labels: + dataprotection: bronze +--- +apiVersion: v1 +kind: Namespace +metadata: + name: k10-validatens-goodns04 + labels: + dataprotection: none \ No newline at end of file diff --git a/kasten-cel/k10-validate-ns-by-preset-label/.chainsaw-test/policy-ready.yaml b/kasten-cel/k10-validate-ns-by-preset-label/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..27e86f51b --- /dev/null +++ b/kasten-cel/k10-validate-ns-by-preset-label/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: k10-validate-ns-by-preset-label +status: + ready: true diff --git a/kasten-cel/k10-validate-ns-by-preset-label/.kyverno-test/kyverno-test.yaml b/kasten-cel/k10-validate-ns-by-preset-label/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..440c95426 --- /dev/null +++ b/kasten-cel/k10-validate-ns-by-preset-label/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,24 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: k10-validate-ns-by-preset-label-test +policies: +- ../k10-validate-ns-by-preset-label.yaml +resources: +- test-resource.yaml +results: +- kind: Namespace + policy: k10-validate-ns-by-preset-label + resources: + - namespace-invalid + result: fail + rule: k10-validate-ns-by-preset-label +- kind: Namespace + policy: k10-validate-ns-by-preset-label + resources: + - namespace-gold + - namespace-silver + - namespace-bronze + - namespace-none + result: pass + rule: k10-validate-ns-by-preset-label diff --git a/kasten-cel/k10-validate-ns-by-preset-label/.kyverno-test/test-resource.yaml b/kasten-cel/k10-validate-ns-by-preset-label/.kyverno-test/test-resource.yaml new file mode 100644 index 000000000..f5dc36f2b --- /dev/null +++ b/kasten-cel/k10-validate-ns-by-preset-label/.kyverno-test/test-resource.yaml @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: namespace-gold + labels: + dataprotection: gold +--- +apiVersion: v1 +kind: Namespace +metadata: + name: namespace-silver + labels: + dataprotection: silver +--- +apiVersion: v1 +kind: Namespace +metadata: + name: namespace-bronze + labels: + dataprotection: bronze +--- +apiVersion: v1 +kind: Namespace +metadata: + name: namespace-none + labels: + dataprotection: none +--- +apiVersion: v1 +kind: Namespace +metadata: + name: namespace-invalid \ No newline at end of file diff --git a/kasten-cel/k10-validate-ns-by-preset-label/artifacthub-pkg.yml b/kasten-cel/k10-validate-ns-by-preset-label/artifacthub-pkg.yml new file mode 100644 index 000000000..c1ec63ef0 --- /dev/null +++ b/kasten-cel/k10-validate-ns-by-preset-label/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: k10-validate-ns-by-preset-label-cel +version: 1.0.0 +displayName: Validate Data Protection by Preset Label in CEL expressions +description: >- + Kubernetes applications are typically deployed into a single, logical namespace. Kasten K10 policies will discover and protect all resources within the selected namespace(s). This policy ensures all new namespaces include a label referencing a valid K10 SLA (Policy Preset) for data protection. This policy can be used in combination with generate ClusterPolicy to automatically create a K10 policy based on the specified SLA. The combination ensures that new applications are not inadvertently left unprotected. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/kasten-cel/k10-validate-ns-by-preset-label/k10-validate-ns-by-preset-label.yaml + ``` +keywords: + - kyverno + - Kasten K10 by Veeam + - CEL Expressions +readme: | + Kubernetes applications are typically deployed into a single, logical namespace. Kasten K10 policies will discover and protect all resources within the selected namespace(s). This policy ensures all new namespaces include a label referencing a valid K10 SLA (Policy Preset) for data protection. This policy can be used in combination with generate ClusterPolicy to automatically create a K10 policy based on the specified SLA. The combination ensures that new applications are not inadvertently left unprotected. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Kasten K10 by Veeam in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Namespace" +digest: e58ab4c2018542a6acd5e97446b09cf04cec26425b9a29f0207c518310c449f3 +createdAt: "2024-05-12T07:09:08Z" + diff --git a/kasten-cel/k10-validate-ns-by-preset-label/k10-validate-ns-by-preset-label.yaml b/kasten-cel/k10-validate-ns-by-preset-label/k10-validate-ns-by-preset-label.yaml new file mode 100644 index 000000000..4668e742a --- /dev/null +++ b/kasten-cel/k10-validate-ns-by-preset-label/k10-validate-ns-by-preset-label.yaml @@ -0,0 +1,42 @@ +#NOTE: This example assumes that K10 policy presets named "gold", "silver", and "bronze" have been pre-created and K10 was deployed into the `kasten-io` namespace. +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: k10-validate-ns-by-preset-label + annotations: + policies.kyverno.io/title: Validate Data Protection by Preset Label in CEL expressions + policies.kyverno.io/category: Kasten K10 by Veeam in CEL + policies.kyverno.io/subject: Namespace + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Kubernetes applications are typically deployed into a single, logical namespace. + Kasten K10 policies will discover and protect all resources within the selected namespace(s). + This policy ensures all new namespaces include a label referencing a valid K10 SLA + (Policy Preset) for data protection.This policy can be used in combination with generate + ClusterPolicy to automatically create a K10 policy based on the specified SLA. + The combination ensures that new applications are not inadvertently left unprotected. +spec: + validationFailureAction: Audit + rules: + - name: k10-validate-ns-by-preset-label + match: + any: + - resources: + kinds: + - Namespace + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "has(object.metadata.labels) && has(object.metadata.labels.dataprotection) && object.metadata.labels.dataprotection in ['gold', 'silver', 'bronze', 'none']" + message: >- + Namespaces must specify a "dataprotection" label with a value corresponding to a Kasten K10 SLA: + + "gold" - + "silver" - + "bronze" - + "none" - No local snapshots or backups diff --git a/kasten/k10-generate-gold-backup-policy/.chainsaw-test/chainsaw-test.yaml b/kasten/k10-generate-gold-backup-policy/.chainsaw-test/chainsaw-test.yaml deleted file mode 100755 index b9680a0fb..000000000 --- a/kasten/k10-generate-gold-backup-policy/.chainsaw-test/chainsaw-test.yaml +++ /dev/null @@ -1,54 +0,0 @@ -# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json -apiVersion: chainsaw.kyverno.io/v1alpha1 -kind: Test -metadata: - creationTimestamp: null - name: k10-generate-gold-backup-policy -spec: - steps: - - name: step-01 - try: - - apply: - file: chainsaw-step-01-apply-1.yaml - - assert: - file: chainsaw-step-01-assert-1.yaml - - name: step-02 - try: - - apply: - file: ../k10-generate-gold-backup-policy.yaml - - assert: - file: policy-ready.yaml - - name: step-03 - try: - - apply: - file: chainsaw-step-03-apply-1.yaml - - apply: - file: chainsaw-step-03-apply-2.yaml - - apply: - file: chainsaw-step-03-apply-3.yaml - - apply: - file: chainsaw-step-03-apply-4.yaml - - apply: - file: chainsaw-step-03-apply-5.yaml - - apply: - file: chainsaw-step-03-apply-6.yaml - - apply: - file: chainsaw-step-03-apply-7.yaml - - apply: - file: chainsaw-step-03-apply-8.yaml - - name: step-04 - try: - - assert: - file: generated-policy.yaml - - error: - file: not-generated-policy.yaml - - name: step-05 - try: - - script: - content: kubectl delete all --all --force --grace-period=0 -n k10-gp-ns01 - - script: - content: kubectl delete all --all --force --grace-period=0 -n k10-gp-ns02 - - script: - content: kubectl delete all --all --force --grace-period=0 -n k10-gp-ns03 - - script: - content: kubectl delete all --all --force --grace-period=0 -n k10-gp-ns04 diff --git a/kasten/k10-generate-gold-backup-policy/.chainsaw-test/generated-policy.yaml b/kasten/k10-generate-gold-backup-policy/.chainsaw-test/generated-policy.yaml deleted file mode 100644 index c6117fc62..000000000 --- a/kasten/k10-generate-gold-backup-policy/.chainsaw-test/generated-policy.yaml +++ /dev/null @@ -1,65 +0,0 @@ -apiVersion: config.kio.kasten.io/v1alpha1 -kind: Policy -metadata: - name: k10-k10-gp-ns01-gold-backup-policy - namespace: k10-gp-ns01 -spec: - actions: - - action: backup - - action: export - exportParameters: - exportData: - enabled: true - frequency: '@monthly' - profile: - name: object-lock-s3 - namespace: kasten-io - retention: - monthly: 12 - yearly: 5 - comment: K10 "gold" immutable production backup policy - frequency: '@daily' - retention: - daily: 7 - monthly: 12 - weekly: 4 - yearly: 7 - selector: - matchExpressions: - - key: k10.kasten.io/appNamespace - operator: In - values: - - k10-gp-ns01 ---- -apiVersion: config.kio.kasten.io/v1alpha1 -kind: Policy -metadata: - name: k10-k10-gp-ns02-gold-backup-policy - namespace: k10-gp-ns02 -spec: - actions: - - action: backup - - action: export - exportParameters: - exportData: - enabled: true - frequency: '@monthly' - profile: - name: object-lock-s3 - namespace: kasten-io - retention: - monthly: 12 - yearly: 5 - comment: K10 "gold" immutable production backup policy - frequency: '@daily' - retention: - daily: 7 - monthly: 12 - weekly: 4 - yearly: 7 - selector: - matchExpressions: - - key: k10.kasten.io/appNamespace - operator: In - values: - - k10-gp-ns02 \ No newline at end of file diff --git a/kasten/k10-generate-gold-backup-policy/.chainsaw-test/not-generated-policy.yaml b/kasten/k10-generate-gold-backup-policy/.chainsaw-test/not-generated-policy.yaml deleted file mode 100644 index 8077a9283..000000000 --- a/kasten/k10-generate-gold-backup-policy/.chainsaw-test/not-generated-policy.yaml +++ /dev/null @@ -1,65 +0,0 @@ -apiVersion: config.kio.kasten.io/v1alpha1 -kind: Policy -metadata: - name: k10-k10-gp-ns03-gold-backup-policy - namespace: k10-gp-ns03 -spec: - actions: - - action: backup - - action: export - exportParameters: - exportData: - enabled: true - frequency: '@monthly' - profile: - name: object-lock-s3 - namespace: kasten-io - retention: - monthly: 12 - yearly: 5 - comment: K10 "gold" immutable production backup policy - frequency: '@daily' - retention: - daily: 7 - monthly: 12 - weekly: 4 - yearly: 7 - selector: - matchExpressions: - - key: k10.kasten.io/appNamespace - operator: In - values: - - k10-gp-ns03 ---- -apiVersion: config.kio.kasten.io/v1alpha1 -kind: Policy -metadata: - name: k10-k10-gp-ns04-gold-backup-policy - namespace: k10-gp-ns04 -spec: - actions: - - action: backup - - action: export - exportParameters: - exportData: - enabled: true - frequency: '@monthly' - profile: - name: object-lock-s3 - namespace: kasten-io - retention: - monthly: 12 - yearly: 5 - comment: K10 "gold" immutable production backup policy - frequency: '@daily' - retention: - daily: 7 - monthly: 12 - weekly: 4 - yearly: 7 - selector: - matchExpressions: - - key: k10.kasten.io/appNamespace - operator: In - values: - - k10-gp-ns04 diff --git a/kasten/k10-generate-gold-backup-policy/artifacthub-pkg.yml b/kasten/k10-generate-gold-backup-policy/artifacthub-pkg.yml deleted file mode 100644 index 9e671e537..000000000 --- a/kasten/k10-generate-gold-backup-policy/artifacthub-pkg.yml +++ /dev/null @@ -1,22 +0,0 @@ -name: k10-generate-gold-backup-policy -version: 1.0.0 -displayName: Generate Gold Backup Policy -createdAt: "2023-04-10T20:12:53.000Z" -description: >- - Generate a backup policy for any Deployment or StatefulSet that adds the labels "dataprotection: k10-goldpolicy" This policy works best to decide the data protection objectives and simply assign backup via application labels. -install: |- - ```shell - kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/kasten/k10-generate-gold-backup-policy/k10-generate-gold-backup-policy.yaml - ``` -keywords: - - kyverno - - Kasten K10 by Veeam -readme: | - Generate a backup policy for any Deployment or StatefulSet that adds the labels "dataprotection: k10-goldpolicy" This policy works best to decide the data protection objectives and simply assign backup via application labels. - - Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ -annotations: - kyverno/category: "Kasten K10 by Veeam" - kyverno/kubernetesVersion: "1.21-1.22" - kyverno/subject: "Policy" -digest: 9c12e7c601640434411e08b965b408cebd9862cb23760cac545a2a96741036b7 diff --git a/kasten/k10-generate-gold-backup-policy/k10-generate-gold-backup-policy.yaml b/kasten/k10-generate-gold-backup-policy/k10-generate-gold-backup-policy.yaml deleted file mode 100644 index f79d0b637..000000000 --- a/kasten/k10-generate-gold-backup-policy/k10-generate-gold-backup-policy.yaml +++ /dev/null @@ -1,63 +0,0 @@ -apiVersion: kyverno.io/v1 -kind: ClusterPolicy -metadata: - name: k10-generate-gold-backup-policy - annotations: - policies.kyverno.io/title: Generate Gold Backup Policy - policies.kyverno.io/category: Kasten K10 by Veeam - kyverno.io/kyverno-version: 1.6.2 - policies.kyverno.io/minversion: 1.6.2 - kyverno.io/kubernetes-version: "1.21-1.22" - policies.kyverno.io/subject: Policy - policies.kyverno.io/description: >- - Generate a backup policy for any Deployment or StatefulSet that adds the labels "dataprotection: k10-goldpolicy" - This policy works best to decide the data protection objectives and simply assign backup via application labels. -spec: - background: false - rules: - - name: k10-generate-gold-backup-policy - match: - any: - - resources: - kinds: - - Deployment - - StatefulSet - selector: - matchLabels: - dataprotection: k10-goldpolicy # match with a corresponding ClusterPolicy that checks for this label - generate: - apiVersion: config.kio.kasten.io/v1alpha1 - kind: Policy - name: k10-{{request.namespace}}-gold-backup-policy - namespace: "{{request.namespace}}" - data: - metadata: - name: k10-{{request.namespace}}-gold-backup-policy - namespace: "{{request.namespace}}" - spec: - comment: K10 "gold" immutable production backup policy - frequency: '@daily' - retention: - daily: 7 - weekly: 4 - monthly: 12 - yearly: 7 - actions: - - action: backup - - action: export - exportParameters: - frequency: '@monthly' - profile: - name: object-lock-s3 - namespace: kasten-io - exportData: - enabled: true - retention: - monthly: 12 - yearly: 5 - selector: - matchExpressions: - - key: k10.kasten.io/appNamespace - operator: In - values: - - "{{request.namespace}}" diff --git a/kasten/k10-minimum-retention/artifacthub-pkg.yml b/kasten/k10-minimum-retention/artifacthub-pkg.yml deleted file mode 100644 index de205e415..000000000 --- a/kasten/k10-minimum-retention/artifacthub-pkg.yml +++ /dev/null @@ -1,22 +0,0 @@ -name: k10-minimum-retention -version: 1.0.0 -displayName: Minimum Backup Retention -createdAt: "2023-04-10T20:12:53.000Z" -description: >- - K10 Policy resources can be validated to adhere to common compliance retention standards. Uncomment the regulation/compliance standards you want to enforce for according to GFS retention. This policy deletes the retention value in the backup operation and replaces it with the specified retention. Note: K10 Policy uses the GFS retention scheme and export operations default to use the retention of the backup operation. To use different This policy can also be used go reduce retentions lengths to enforce cost optimization. -install: |- - ```shell - kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/kasten/k10-minimum-retention/k10-minimum-retention.yaml - ``` -keywords: - - kyverno - - Kasten K10 by Veeam -readme: | - K10 Policy resources can be validated to adhere to common compliance retention standards. Uncomment the regulation/compliance standards you want to enforce for according to GFS retention. This policy deletes the retention value in the backup operation and replaces it with the specified retention. Note: K10 Policy uses the GFS retention scheme and export operations default to use the retention of the backup operation. To use different This policy can also be used go reduce retentions lengths to enforce cost optimization. - - Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ -annotations: - kyverno/category: "Kasten K10 by Veeam" - kyverno/kubernetesVersion: "1.21-1.22" - kyverno/subject: "Policy" -digest: f7d09195f6c8982f0075c866b0480626a3fbf4fd352130ae0a1be86abb79c2b7 diff --git a/kasten/kasten-generate-example-backup-policy/.kyverno-test/generatedResource.yaml b/kasten/kasten-generate-example-backup-policy/.kyverno-test/generatedResource.yaml new file mode 100644 index 000000000..650b634e5 --- /dev/null +++ b/kasten/kasten-generate-example-backup-policy/.kyverno-test/generatedResource.yaml @@ -0,0 +1,29 @@ +apiVersion: config.kio.kasten.io/v1alpha1 +kind: Policy +metadata: + name: test-namespace-kasten-example-policy + namespace: kasten-io +spec: + comment: "Auto-generated by Kyverno" + frequency: '@daily' + retention: + daily: 7 + weekly: 4 + monthly: 12 + yearly: 7 + actions: + - action: backup + - action: export + exportParameters: + frequency: '@daily' + profile: + name: test + namespace: kasten-io + exportData: + enabled: true + selector: + matchExpressions: + - key: k10.kasten.io/appNamespace + operator: In + values: + - test-namespace \ No newline at end of file diff --git a/kasten/kasten-generate-example-backup-policy/.kyverno-test/kyverno-test.yaml b/kasten/kasten-generate-example-backup-policy/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..9d1cde8a9 --- /dev/null +++ b/kasten/kasten-generate-example-backup-policy/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,17 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: kasten-generate-example-backup-policy-test +policies: +- ../kasten-generate-example-backup-policy.yaml +resources: +- test-resource.yaml +results: +- generatedResource: generatedResource.yaml + kind: Deployment + policy: kasten-generate-example-backup-policy + resources: + - test-deployment + result: pass + rule: kasten-generate-example-backup-policy +variables: test-values.yaml diff --git a/kasten/kasten-generate-example-backup-policy/.kyverno-test/test-resource.yaml b/kasten/kasten-generate-example-backup-policy/.kyverno-test/test-resource.yaml new file mode 100644 index 000000000..d25ce5dda --- /dev/null +++ b/kasten/kasten-generate-example-backup-policy/.kyverno-test/test-resource.yaml @@ -0,0 +1,23 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-deployment + namespace: test-namespace + labels: + app: nginx + dataprotection: kasten-example +spec: + replicas: 1 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 \ No newline at end of file diff --git a/kasten/kasten-generate-example-backup-policy/.kyverno-test/test-values.yaml b/kasten/kasten-generate-example-backup-policy/.kyverno-test/test-values.yaml new file mode 100644 index 000000000..2de482915 --- /dev/null +++ b/kasten/kasten-generate-example-backup-policy/.kyverno-test/test-values.yaml @@ -0,0 +1,13 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Values +policies: +- name: kasten-generate-example-backup-policy + resources: + - name: test-variables + values: + request.namespace: test-namespace + dataprotectionLabelValue: kasten-example + rules: + - name: kasten-generate-example-backup-policy + values: + existingPolicy: 0 diff --git a/kasten/kasten-generate-example-backup-policy/artifacthub-pkg.yml b/kasten/kasten-generate-example-backup-policy/artifacthub-pkg.yml new file mode 100644 index 000000000..e6b52eefd --- /dev/null +++ b/kasten/kasten-generate-example-backup-policy/artifacthub-pkg.yml @@ -0,0 +1,20 @@ +name: kasten-generate-example-backup-policy +version: 1.0.1 +displayName: Generate Kasten Backup Policy Based on Resource Label +createdAt: "2023-05-07T00:00:00.000Z" +description: >- + Generates a Kasten policy for a namespace that includes any Deployment or StatefulSet with a "dataprotection=kasten-example" label, if the policy does not already exist. This Kyverno policy can be used in combination with the "kasten-data-protection-by-label" policy to require "dataprotection" labeling on workloads. NOTE: Use of this policy will require granting the Kyverno background-controller additional privileges required to generate Kasten resources. An example ClusterRole to provide required privileges is provided within the comments of the policy manifest. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/kasten/kasten-generate-example-backup-policy/kasten-generate-example-backup-policy.yaml + ``` +keywords: + - kyverno + - Veeam Kasten +readme: | + Generates a Kasten policy for a namespace that includes any Deployment or StatefulSet with a "dataprotection=kasten-example" label, if the policy does not already exist. This Kyverno policy can be used in combination with the "kasten-data-protection-by-label" policy to require "dataprotection" labeling on workloads. NOTE: Use of this policy will require granting the Kyverno background-controller additional privileges required to generate Kasten resources. An example ClusterRole to provide required privileges is provided within the comments of the policy manifest. +annotations: + kyverno/category: "Veeam Kasten" + kyverno/kubernetesVersion: "1.24-1.30" + kyverno/subject: "Policy" +digest: 74edc3942670ec20e8b9ab00db894e503071bcc4c2da12dca2a6e03a2b2f562a diff --git a/kasten/kasten-generate-example-backup-policy/kasten-generate-example-backup-policy.yaml b/kasten/kasten-generate-example-backup-policy/kasten-generate-example-backup-policy.yaml new file mode 100644 index 000000000..995ed99f9 --- /dev/null +++ b/kasten/kasten-generate-example-backup-policy/kasten-generate-example-backup-policy.yaml @@ -0,0 +1,97 @@ +# This is an example rule intended to be cloned & modified to meet organizational requirements. +# The `dataprotetion` label value can be changed to correspond with specific policy templates. +# +# NOTE: Use of this policy will require granting the Kyverno background-controller additional privileges required to generate Kasten resources. An example ClusterRole to provide required privileges is provided within the comments of the policy manifest. +# +# apiVersion: rbac.authorization.k8s.io/v1 +# kind: ClusterRole +# metadata: +# labels: +# app.kubernetes.io/component: background-controller +# app.kubernetes.io/instance: kyverno +# app.kubernetes.io/part-of: kyverno +# name: kyverno:create-kasten-policies +# rules: +# - apiGroups: +# - config.kio.kasten.io +# resources: +# - policies +# verbs: +# - create +# - update +# - delete +# +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: kasten-generate-example-backup-policy + annotations: + policies.kyverno.io/title: Generate Kasten Backup Policy Based on Resource Label + policies.kyverno.io/category: Veeam Kasten + kyverno.io/kyverno-version: 1.12.1 + policies.kyverno.io/minversion: 1.12.0 + kyverno.io/kubernetes-version: "1.24-1.30" + policies.kyverno.io/subject: Policy + policies.kyverno.io/description: >- + Generates a Kasten policy for a namespace that includes any Deployment or StatefulSet with a "dataprotection=kasten-example" label, if the policy does not already exist. This Kyverno policy can be used in combination with the "kasten-data-protection-by-label" policy to require "dataprotection" labeling on workloads. +spec: + rules: + - name: kasten-generate-example-backup-policy + match: + any: + - resources: + kinds: + - Deployment + - StatefulSet + selector: + matchLabels: + dataprotection: kasten-example + context: + - name: dataprotectionLabelValue + variable: + value: "kasten-example" + - name: kyvernoPolicyName + variable: + value: "kasten-generate-example-backup-policy" + - name: existingPolicy + apiCall: + urlPath: "/apis/config.kio.kasten.io/v1alpha1/namespaces/kasten-io/policies" # returns list of Kasten policies from kasten-io namespace + jmesPath: "items[][[@.metadata.labels.\"generate.kyverno.io/policy-name\"=='{{ kyvernoPolicyName }}'] && [@.spec.selector.matchExpressions[].values[?@=='{{ request.namespace }}']]][][][][] | length(@)" # queries if a Kasten policy protecting the namespace generated by this Kyverno policy already exists + preconditions: + any: + - key: "{{ existingPolicy }}" + operator: Equals + value: 0 # Only generate the policy if it does not already exist + generate: + apiVersion: config.kio.kasten.io/v1alpha1 + kind: Policy + name: "{{ request.namespace }}-{{ dataprotectionLabelValue }}-policy" + namespace: kasten-io + data: + metadata: + name: "{{ request.namespace }}-{{ dataprotectionLabelValue }}-policy" + namespace: kasten-io + spec: + comment: "Auto-generated by Kyverno" + frequency: '@daily' + retention: + daily: 7 + weekly: 4 + monthly: 12 + yearly: 7 + actions: + - action: backup + - action: export + exportParameters: + frequency: '@daily' + profile: + name: test + namespace: kasten-io + exportData: + enabled: true + selector: + matchExpressions: + - key: k10.kasten.io/appNamespace + operator: In + values: + - "{{ request.namespace }}" diff --git a/kasten/kasten-generate-policy-by-preset-label/artifacthub-pkg.yml b/kasten/kasten-generate-policy-by-preset-label/artifacthub-pkg.yml index 43e1aa102..31a33aab8 100644 --- a/kasten/kasten-generate-policy-by-preset-label/artifacthub-pkg.yml +++ b/kasten/kasten-generate-policy-by-preset-label/artifacthub-pkg.yml @@ -3,9 +3,7 @@ version: 1.0.1 displayName: Generate Kasten Policy from Preset createdAt: "2023-05-07T00:00:00.000Z" description: >- - Generates a Kasten policy for a new namespace that includes a valid "dataprotection" label, if the policy does not already exist. - - Use with "kasten-validate-ns-by-preset-label" policy to require "dataprotection" labeling on new namespaces. + Generates a Kasten policy for a new namespace that includes a valid "dataprotection" label, if the policy does not already exist. This Kyverno policy can be used in combination with the "kasten-validate-ns-by-preset-label" policy to require "dataprotection" labeling on new namespaces. NOTE: Use of this policy will require granting the Kyverno background-controller additional privileges required to generate Kasten resources. An example ClusterRole to provide required privileges is provided within the comments of the policy manifest. install: |- ```shell kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/kasten/kasten-generate-policy-by-preset-label/kasten-generate-policy-by-preset-label.yaml @@ -14,11 +12,9 @@ keywords: - kyverno - Veeam Kasten readme: | - Generates a Kasten policy for a new namespace that includes a valid "dataprotection" label, if the policy does not already exist. - Use with "kasten-validate-ns-by-preset-label" policy to require "dataprotection" labeling on new namespaces. - + Generates a Kasten policy for a new namespace that includes a valid "dataprotection" label, if the policy does not already exist. This Kyverno policy can be used in combination with the "kasten-validate-ns-by-preset-label" policy to require "dataprotection" labeling on new namespaces. NOTE: Use of this policy will require granting the Kyverno background-controller additional privileges required to generate Kasten resources. An example ClusterRole to provide required privileges is provided within the comments of the policy manifest. annotations: kyverno/category: "Veeam Kasten" kyverno/kubernetesVersion: "1.24-1.30" kyverno/subject: "Policy" -digest: bd6c752cc28abd28792b579956bdddc69864ab0ffae4dd95b3d47de6977b0aae +digest: cddabf7614a6122728cf0f862013266ddb5731eb45fcaa41d6cb243e9881aad7 diff --git a/kasten/kasten-generate-policy-by-preset-label/kasten-generate-policy-by-preset-label.yaml b/kasten/kasten-generate-policy-by-preset-label/kasten-generate-policy-by-preset-label.yaml index 8d8da97b6..f7aabe6e3 100644 --- a/kasten/kasten-generate-policy-by-preset-label/kasten-generate-policy-by-preset-label.yaml +++ b/kasten/kasten-generate-policy-by-preset-label/kasten-generate-policy-by-preset-label.yaml @@ -1,11 +1,25 @@ -# This example assumes that Kasten policy presets named -# "gold", "silver", and "bronze" have been pre-created -# and Kasten was deployed into the `kasten-io` namespace. +# This example assumes that Kasten policy presets named "gold", "silver", and "bronze" have been pre-created and Kasten was deployed into the `kasten-io` namespace. +# +# NOTE: Use of this policy will require granting the Kyverno background-controller additional privileges required to generate Kasten resources. An example ClusterRole to provide required privileges is provided within the comments of the policy manifest. +# +# apiVersion: rbac.authorization.k8s.io/v1 +# kind: ClusterRole +# metadata: +# labels: +# app.kubernetes.io/component: background-controller +# app.kubernetes.io/instance: kyverno +# app.kubernetes.io/part-of: kyverno +# name: kyverno:create-kasten-policies +# rules: +# - apiGroups: +# - config.kio.kasten.io +# resources: +# - policies +# verbs: +# - create +# - update +# - delete # -# Additionally, the Kyverno background controller requires -# additional permissions to create Kasten Policy resources. -# Apply the create-kasten-policies-clusterrole.yaml manifest -# first to grant the required permissions. apiVersion: kyverno.io/v1 kind: ClusterPolicy metadata: diff --git a/kasten/k10-minimum-retention/.chainsaw-test/README.md b/kasten/kasten-minimum-retention/.chainsaw-test/README.md similarity index 100% rename from kasten/k10-minimum-retention/.chainsaw-test/README.md rename to kasten/kasten-minimum-retention/.chainsaw-test/README.md diff --git a/kasten/k10-minimum-retention/.chainsaw-test/chainsaw-step-01-assert-1.yaml b/kasten/kasten-minimum-retention/.chainsaw-test/chainsaw-step-01-assert-1.yaml similarity index 100% rename from kasten/k10-minimum-retention/.chainsaw-test/chainsaw-step-01-assert-1.yaml rename to kasten/kasten-minimum-retention/.chainsaw-test/chainsaw-step-01-assert-1.yaml diff --git a/kasten/k10-minimum-retention/.chainsaw-test/chainsaw-test.yaml b/kasten/kasten-minimum-retention/.chainsaw-test/chainsaw-test.yaml similarity index 87% rename from kasten/k10-minimum-retention/.chainsaw-test/chainsaw-test.yaml rename to kasten/kasten-minimum-retention/.chainsaw-test/chainsaw-test.yaml index a65611901..fbed4e96a 100755 --- a/kasten/k10-minimum-retention/.chainsaw-test/chainsaw-test.yaml +++ b/kasten/kasten-minimum-retention/.chainsaw-test/chainsaw-test.yaml @@ -3,7 +3,7 @@ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: creationTimestamp: null - name: k10-minimum-retention + name: kasten-minimum-retention spec: steps: - name: step-01 @@ -15,7 +15,7 @@ spec: - apply: file: ns.yaml - apply: - file: ../k10-minimum-retention.yaml + file: ../kasten-minimum-retention.yaml - assert: file: policy-ready.yaml - name: step-03 diff --git a/kasten/k10-minimum-retention/.chainsaw-test/kuttlresource.yaml b/kasten/kasten-minimum-retention/.chainsaw-test/kuttlresource.yaml similarity index 95% rename from kasten/k10-minimum-retention/.chainsaw-test/kuttlresource.yaml rename to kasten/kasten-minimum-retention/.chainsaw-test/kuttlresource.yaml index 9f139aaba..7bb8254fc 100644 --- a/kasten/k10-minimum-retention/.chainsaw-test/kuttlresource.yaml +++ b/kasten/kasten-minimum-retention/.chainsaw-test/kuttlresource.yaml @@ -2,7 +2,7 @@ apiVersion: config.kio.kasten.io/v1alpha1 kind: Policy metadata: name: hourly-policy - namespace: k10-minimum-retention + namespace: kasten-minimum-retention labels: appPriority: Mission-Critical spec: diff --git a/kasten/k10-minimum-retention/.chainsaw-test/ns.yaml b/kasten/kasten-minimum-retention/.chainsaw-test/ns.yaml similarity index 56% rename from kasten/k10-minimum-retention/.chainsaw-test/ns.yaml rename to kasten/kasten-minimum-retention/.chainsaw-test/ns.yaml index 6ff7e7310..9fdbec7b2 100644 --- a/kasten/k10-minimum-retention/.chainsaw-test/ns.yaml +++ b/kasten/kasten-minimum-retention/.chainsaw-test/ns.yaml @@ -1,4 +1,4 @@ apiVersion: v1 kind: Namespace metadata: - name: k10-minimum-retention \ No newline at end of file + name: kasten-minimum-retention \ No newline at end of file diff --git a/kasten/kasten-minimum-retention/.chainsaw-test/policy-ready.yaml b/kasten/kasten-minimum-retention/.chainsaw-test/policy-ready.yaml new file mode 100644 index 000000000..927882ff9 --- /dev/null +++ b/kasten/kasten-minimum-retention/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,9 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: kasten-minimum-retention +status: + conditions: + - reason: Succeeded + status: "True" + type: Ready \ No newline at end of file diff --git a/kasten/k10-minimum-retention/.chainsaw-test/resource-mutated.yaml b/kasten/kasten-minimum-retention/.chainsaw-test/resource-mutated.yaml similarity index 95% rename from kasten/k10-minimum-retention/.chainsaw-test/resource-mutated.yaml rename to kasten/kasten-minimum-retention/.chainsaw-test/resource-mutated.yaml index 171754953..569de9cf8 100644 --- a/kasten/k10-minimum-retention/.chainsaw-test/resource-mutated.yaml +++ b/kasten/kasten-minimum-retention/.chainsaw-test/resource-mutated.yaml @@ -5,7 +5,7 @@ metadata: labels: appPriority: Mission-Critical name: hourly-policy - namespace: k10-minimum-retention + namespace: kasten-minimum-retention spec: actions: - action: backup diff --git a/kasten/k10-minimum-retention/.kyverno-test/backup-export-policy.yaml b/kasten/kasten-minimum-retention/.kyverno-test/kasten-hourly-policy.yaml similarity index 76% rename from kasten/k10-minimum-retention/.kyverno-test/backup-export-policy.yaml rename to kasten/kasten-minimum-retention/.kyverno-test/kasten-hourly-policy.yaml index 6abb9a37f..94db9932e 100644 --- a/kasten/k10-minimum-retention/.kyverno-test/backup-export-policy.yaml +++ b/kasten/kasten-minimum-retention/.kyverno-test/kasten-hourly-policy.yaml @@ -1,4 +1,3 @@ -# An example compliant K10 Policy apiVersion: config.kio.kasten.io/v1alpha1 kind: Policy metadata: @@ -8,7 +7,7 @@ metadata: appPriority: Mission-Critical spec: comment: My sample custom backup policy - frequency: '@hourly' # change this to @daily to test the 'audit_mission_critical_RPO' policy + frequency: '@hourly' subFrequency: minutes: [30] hours: [22,7] @@ -20,7 +19,7 @@ spec: monthly: 6 actions: - action: backup - - action: export # comment this line out to test 'enforce_3-2-1' policy + - action: export exportParameters: frequency: '@monthly' profile: diff --git a/kasten/kasten-minimum-retention/.kyverno-test/kasten-skipped-policies.yaml b/kasten/kasten-minimum-retention/.kyverno-test/kasten-skipped-policies.yaml new file mode 100644 index 000000000..73f735abc --- /dev/null +++ b/kasten/kasten-minimum-retention/.kyverno-test/kasten-skipped-policies.yaml @@ -0,0 +1,33 @@ +apiVersion: config.kio.kasten.io/v1alpha1 +kind: Policy +metadata: + name: preset-policy + namespace: kasten-io + labels: + appPriority: Mission-Critical +spec: + comment: My sample custom backup policy + presetRef: + name: mypreset + namespace: kasten-io + actions: + - action: backup + selector: + matchLabels: + k10.kasten.io/appNamespace: sampleApp +--- +apiVersion: config.kio.kasten.io/v1alpha1 +kind: Policy +metadata: + name: ondemand-policy + namespace: kasten-io + labels: + appPriority: Mission-Critical +spec: + comment: My sample custom backup policy + frequency: '@onDemand' + actions: + - action: backup + selector: + matchLabels: + k10.kasten.io/appNamespace: sampleApp \ No newline at end of file diff --git a/kasten/kasten-minimum-retention/.kyverno-test/kyverno-test.yaml b/kasten/kasten-minimum-retention/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..606bdee55 --- /dev/null +++ b/kasten/kasten-minimum-retention/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,29 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: kyverno_data_protection_tests +policies: +- ../kasten-minimum-retention.yaml +resources: +- kasten-skipped-policies.yaml +- kasten-hourly-policy.yaml +results: +- kind: Policy + patchedResource: patched.yaml + policy: kasten-minimum-retention + resources: + - hourly-policy + result: pass + rule: kasten-minimum-retention +- kind: Policy + policy: kasten-minimum-retention + resources: + - ondemand-policy + result: skip + rule: kasten-minimum-retention +- kind: Policy + policy: kasten-minimum-retention + resources: + - preset-policy + result: skip + rule: kasten-minimum-retention diff --git a/kasten/k10-minimum-retention/.kyverno-test/patched.yaml b/kasten/kasten-minimum-retention/.kyverno-test/patched.yaml similarity index 100% rename from kasten/k10-minimum-retention/.kyverno-test/patched.yaml rename to kasten/kasten-minimum-retention/.kyverno-test/patched.yaml diff --git a/kasten/kasten-minimum-retention/artifacthub-pkg.yml b/kasten/kasten-minimum-retention/artifacthub-pkg.yml new file mode 100644 index 000000000..51b74f6d0 --- /dev/null +++ b/kasten/kasten-minimum-retention/artifacthub-pkg.yml @@ -0,0 +1,20 @@ +name: kasten-minimum-retention +version: 1.0.1 +displayName: Set Kasten Policy Minimum Backup Retention +createdAt: "2023-05-07T00:00:00.000Z" +description: >- + Example Kyverno policy to enforce common compliance retention standards by modifying Kasten Policy backup retention settings. Based on regulation/compliance standard requirements, uncomment (1) of the desired GFS retention schedules to mutate existing and future Kasten Policies. Alternatively, this policy can be used to reduce retention lengths to enforce cost optimization. NOTE: This example only applies to Kasten Policies with an '@hourly' frequency. Refer to Kasten documentation for Policy API specification if modifications are necessary: https://docs.kasten.io/latest/api/policies.html#policy-api-type +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/kasten/kasten-minimum-retention/kasten-minimum-retention.yaml + ``` +keywords: + - kyverno + - Veeam Kasten +readme: | + Example Kyverno policy to enforce common compliance retention standards by modifying Kasten Policy backup retention settings. Based on regulation/compliance standard requirements, uncomment (1) of the desired GFS retention schedules to mutate existing and future Kasten Policies. Alternatively, this policy can be used to reduce retention lengths to enforce cost optimization. NOTE: This example only applies to Kasten Policies with an '@hourly' frequency. Refer to Kasten documentation for Policy API specification if modifications are necessary: https://docs.kasten.io/latest/api/policies.html#policy-api-type +annotations: + kyverno/category: "Veeam Kasten" + kyverno/kubernetesVersion: "1.24-1.30" + kyverno/subject: "Policy" +digest: e394e005816521b6157a1ef4a0c9757bca956dd706f6a82746fe661c7938d61f diff --git a/kasten/k10-minimum-retention/k10-minimum-retention.yaml b/kasten/kasten-minimum-retention/kasten-minimum-retention.yaml similarity index 55% rename from kasten/k10-minimum-retention/k10-minimum-retention.yaml rename to kasten/kasten-minimum-retention/kasten-minimum-retention.yaml index a7535c298..1a21d7c81 100644 --- a/kasten/k10-minimum-retention/k10-minimum-retention.yaml +++ b/kasten/kasten-minimum-retention/kasten-minimum-retention.yaml @@ -1,30 +1,38 @@ apiVersion: kyverno.io/v1 kind: ClusterPolicy metadata: - name: k10-minimum-retention + name: kasten-minimum-retention annotations: - policies.kyverno.io/title: Minimum Backup Retention - policies.kyverno.io/category: Kasten K10 by Veeam - kyverno.io/kyverno-version: 1.6.2 + policies.kyverno.io/title: Set Kasten Policy Minimum Backup Retention + policies.kyverno.io/category: Veeam Kasten + kyverno.io/kyverno-version: 1.12.1 policies.kyverno.io/minversion: 1.6.2 - kyverno.io/kubernetes-version: "1.21-1.22" + kyverno.io/kubernetes-version: "1.24-1.30" policies.kyverno.io/subject: Policy policies.kyverno.io/description: >- - K10 Policy resources can be validated to adhere to common compliance retention standards. - Uncomment the regulation/compliance standards you want to enforce for according to GFS retention. - This policy deletes the retention value in the backup operation and replaces it with the specified retention. - Note: K10 Policy uses the GFS retention scheme and export operations default to use the retention of the backup operation. - To use different - This policy can also be used go reduce retentions lengths to enforce cost optimization. + Example Kyverno policy to enforce common compliance retention standards by modifying Kasten Policy backup retention settings. Based on regulation/compliance standard requirements, uncomment (1) of the desired GFS retention schedules to mutate existing and future Kasten Policies. Alternatively, this policy can be used to reduce retention lengths to enforce cost optimization. NOTE: This example only applies to Kasten Policies with an '@hourly' frequency. Refer to Kasten documentation for Policy API specification if modifications are necessary: https://docs.kasten.io/latest/api/policies.html#policy-api-type spec: schemaValidation: false rules: - - name: k10-minimum-retention + - name: kasten-minimum-retention match: any: - resources: kinds: - config.kio.kasten.io/v1alpha1/Policy + preconditions: + all: + # Match only @hourly policies that do not use policy presets, as the + # number of retained artifacts can only be specified for frequencies + # of the same or lower granularity than the policy frequency. For example, + # if the policy frequency is '@daily', then retention can have values for + # 'daily', 'weekly', 'monthly' and 'yearly', but not for 'hourly'. + # If the policy frequency is 'hourly', then all retention values are + # allowed. If the policy frequency is '@onDemand' or policy preset is used + # then retention values are not allowed. + - key: "{{ request.object.spec.frequency || ''}}" + operator: Equals + value: '@hourly' mutate: # Federal Information Security Management Act (FISMA): 3 Years #patchesJson6902: |- diff --git a/kubecost-cel/require-kubecost-labels/.chainsaw-test/chainsaw-test.yaml b/kubecost-cel/require-kubecost-labels/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..f948a1a98 --- /dev/null +++ b/kubecost-cel/require-kubecost-labels/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,38 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: require-kubecost-labels +spec: + steps: + - name: step-01 + try: + - apply: + file: ../require-kubecost-labels.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: require-kubecost-labels + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: pod-good.yaml + - apply: + file: podcontroller-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: pod-bad.yaml + - apply: + expect: + - check: + ($error != null): true + file: podcontroller-bad.yaml diff --git a/kubecost-cel/require-kubecost-labels/.chainsaw-test/pod-bad.yaml b/kubecost-cel/require-kubecost-labels/.chainsaw-test/pod-bad.yaml new file mode 100644 index 000000000..68e1fe0a1 --- /dev/null +++ b/kubecost-cel/require-kubecost-labels/.chainsaw-test/pod-bad.yaml @@ -0,0 +1,37 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + foo: bar + name: badpod01 +spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + foo: bar + env: foo + name: badpod02 +spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + foo: bar + owner: foo + team: bar + department: foo + app: bar + name: badpod03 +spec: + containers: + - name: busybox + image: busybox:1.35 \ No newline at end of file diff --git a/kubecost-cel/require-kubecost-labels/.chainsaw-test/pod-good.yaml b/kubecost-cel/require-kubecost-labels/.chainsaw-test/pod-good.yaml new file mode 100644 index 000000000..50ec73c0a --- /dev/null +++ b/kubecost-cel/require-kubecost-labels/.chainsaw-test/pod-good.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + foo: bar + owner: foo + team: bar + department: foo + app: bar + env: foo + name: goodpod01 +spec: + containers: + - name: busybox + image: busybox:1.35 \ No newline at end of file diff --git a/kubecost-cel/require-kubecost-labels/.chainsaw-test/podcontroller-bad.yaml b/kubecost-cel/require-kubecost-labels/.chainsaw-test/podcontroller-bad.yaml new file mode 100644 index 000000000..44277f3ba --- /dev/null +++ b/kubecost-cel/require-kubecost-labels/.chainsaw-test/podcontroller-bad.yaml @@ -0,0 +1,82 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: baddeploy01 +spec: + replicas: 1 + selector: + matchLabels: + foo: bar + template: + metadata: + labels: + foo: bar + spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: baddeploy02 +spec: + replicas: 1 + selector: + matchLabels: + owner: "foo" + template: + metadata: + labels: + owner: "foo" + spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + labels: + foo: bar + spec: + containers: + - name: hello + image: busybox:1.35 + command: + - "sleep" + - "3600" + restartPolicy: OnFailure +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob02 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + labels: + owner: "foo" + team: "foo" + spec: + containers: + - name: hello + image: busybox:1.35 + command: + - "sleep" + - "3600" + restartPolicy: OnFailure \ No newline at end of file diff --git a/kubecost-cel/require-kubecost-labels/.chainsaw-test/podcontroller-good.yaml b/kubecost-cel/require-kubecost-labels/.chainsaw-test/podcontroller-good.yaml new file mode 100644 index 000000000..4e85726df --- /dev/null +++ b/kubecost-cel/require-kubecost-labels/.chainsaw-test/podcontroller-good.yaml @@ -0,0 +1,99 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: gooddeploy01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + template: + metadata: + labels: + app: busybox + owner: "foo" + team: "foo" + department: "foo" + env: "foo" + spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: gooddeploy02 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + template: + metadata: + labels: + app: busybox + owner: "foo" + team: "foo" + department: "foo" + env: "foo" + foo: bar + spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + labels: + owner: "foo" + team: "foo" + department: "foo" + app: "foo" + env: "foo" + spec: + containers: + - name: hello + image: busybox:1.35 + command: + - "sleep" + - "3600" + restartPolicy: OnFailure +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob02 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + labels: + owner: "foo" + team: "foo" + department: "foo" + app: "foo" + env: "foo" + foo: bar + spec: + containers: + - name: hello + image: busybox:1.35 + command: + - "sleep" + - "3600" + restartPolicy: OnFailure \ No newline at end of file diff --git a/kubecost-cel/require-kubecost-labels/.chainsaw-test/policy-ready.yaml b/kubecost-cel/require-kubecost-labels/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..ed44c7cf3 --- /dev/null +++ b/kubecost-cel/require-kubecost-labels/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-kubecost-labels +status: + ready: true diff --git a/kubecost-cel/require-kubecost-labels/.kyverno-test/kyverno-test.yaml b/kubecost-cel/require-kubecost-labels/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..41e2fbf5f --- /dev/null +++ b/kubecost-cel/require-kubecost-labels/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,25 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: require-kubecost-labels +policies: +- ../require-kubecost-labels.yaml +resources: +- resource.yaml +results: +- kind: Pod + policy: require-kubecost-labels + resources: + - badpod01 + - badpod02 + - badpod03 + - badpod04 + - badpod05 + result: fail + rule: require-labels +- kind: Pod + policy: require-kubecost-labels + resources: + - goodpod + result: pass + rule: require-labels diff --git a/kubecost-cel/require-kubecost-labels/.kyverno-test/resource.yaml b/kubecost-cel/require-kubecost-labels/.kyverno-test/resource.yaml new file mode 100644 index 000000000..17ab732f0 --- /dev/null +++ b/kubecost-cel/require-kubecost-labels/.kyverno-test/resource.yaml @@ -0,0 +1,73 @@ +apiVersion: v1 +kind: Pod +metadata: + name: goodpod + labels: + owner: John Doe + team: falcon + department: eng + app: redis + env: prod2 +spec: + containers: + - image: busybox:1.35 + name: busybox +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod01 +spec: + containers: + - image: busybox:1.35 + name: busybox +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod02 + labels: + owner: John Doe +spec: + containers: + - image: busybox:1.35 + name: busybox +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod03 + labels: + owner: John Doe + team: falcon +spec: + containers: + - image: busybox:1.35 + name: busybox +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod04 + labels: + owner: John Doe + team: falcon + department: eng +spec: + containers: + - image: busybox:1.35 + name: busybox +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod05 + labels: + owner: John Doe + team: falcon + department: eng + app: redis +spec: + containers: + - image: busybox:1.35 + name: busybox diff --git a/kubecost-cel/require-kubecost-labels/artifacthub-pkg.yml b/kubecost-cel/require-kubecost-labels/artifacthub-pkg.yml new file mode 100644 index 000000000..bfae83d11 --- /dev/null +++ b/kubecost-cel/require-kubecost-labels/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: require-kubecost-labels-cel +version: 1.0.0 +displayName: Require Kubecost Labels in CEL expressions +description: >- + Kubecost can use labels assigned to Pods in order to track and display cost allocation in a granular way. These labels, which can be customized, can be used to organize and group workloads in different ways. This policy requires that the labels `owner`, `team`, `department`, `app`, and `env` are all defined on Pods. With Kyverno autogen enabled (absence of the annotation `pod-policies.kyverno.io/autogen-controllers=none`), these labels will also be required for all Pod controllers. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/kubecost-cel/require-kubecost-labels/require-kubecost-labels.yaml + ``` +keywords: + - kyverno + - Kubecost + - CEL Expressions +readme: | + Kubecost can use labels assigned to Pods in order to track and display cost allocation in a granular way. These labels, which can be customized, can be used to organize and group workloads in different ways. This policy requires that the labels `owner`, `team`, `department`, `app`, and `env` are all defined on Pods. With Kyverno autogen enabled (absence of the annotation `pod-policies.kyverno.io/autogen-controllers=none`), these labels will also be required for all Pod controllers. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Kubecost in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Pod,Label" +digest: e7dc12ab8d4fa467c23bc117db5c9e33e5e0d804c597ee0d88fb9f55f11ab535 +createdAt: "2024-05-12T06:59:59Z" + diff --git a/kubecost-cel/require-kubecost-labels/require-kubecost-labels.yaml b/kubecost-cel/require-kubecost-labels/require-kubecost-labels.yaml new file mode 100644 index 000000000..32ca0dccb --- /dev/null +++ b/kubecost-cel/require-kubecost-labels/require-kubecost-labels.yaml @@ -0,0 +1,43 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-kubecost-labels + annotations: + policies.kyverno.io/title: Require Kubecost Labels in CEL expressions + policies.kyverno.io/category: Kubecost in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Pod, Label + kyverno.io/kyverno-version: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Kubecost can use labels assigned to Pods in order to track and display + cost allocation in a granular way. These labels, which can be customized, can be used + to organize and group workloads in different ways. This policy requires that the labels + `owner`, `team`, `department`, `app`, and `env` are all defined on Pods. With Kyverno + autogen enabled (absence of the annotation `pod-policies.kyverno.io/autogen-controllers=none`), + these labels will also be required for all Pod controllers. +spec: + validationFailureAction: Audit + background: true + rules: + - name: require-labels + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + has(object.metadata.labels) && + has(object.metadata.labels.owner) && object.metadata.labels.owner != '' && + has(object.metadata.labels.team) && object.metadata.labels.team != '' && + has(object.metadata.labels.department) && object.metadata.labels.department != '' && + has(object.metadata.labels.app) && object.metadata.labels.app != '' && + has(object.metadata.labels.env) && object.metadata.labels.env != '' + message: "The Kubecost labels `owner`, `team`, `department`, `app`, and `env` are all required for Pods." + diff --git a/kubecost/require-kubecost-labels/.chainsaw-test/podcontroller-bad.yaml b/kubecost/require-kubecost-labels/.chainsaw-test/podcontroller-bad.yaml index d8db785f5..44277f3ba 100644 --- a/kubecost/require-kubecost-labels/.chainsaw-test/podcontroller-bad.yaml +++ b/kubecost/require-kubecost-labels/.chainsaw-test/podcontroller-bad.yaml @@ -8,7 +8,7 @@ spec: replicas: 1 selector: matchLabels: - app: busybox + foo: bar template: metadata: labels: @@ -28,7 +28,7 @@ spec: replicas: 1 selector: matchLabels: - app: busybox + owner: "foo" template: metadata: labels: diff --git a/linkerd-cel/prevent-linkerd-pod-injection-override/.chainsaw-test/bad-pod.yaml b/linkerd-cel/prevent-linkerd-pod-injection-override/.chainsaw-test/bad-pod.yaml new file mode 100644 index 000000000..2ffa3ce71 --- /dev/null +++ b/linkerd-cel/prevent-linkerd-pod-injection-override/.chainsaw-test/bad-pod.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Pod +metadata: + annotations: + linkerd.io/inject: disabled + name: badpod01 +spec: + containers: + - image: busybox:1.35 + name: busybox \ No newline at end of file diff --git a/linkerd-cel/prevent-linkerd-pod-injection-override/.chainsaw-test/bad-podcontrollers.yaml b/linkerd-cel/prevent-linkerd-pod-injection-override/.chainsaw-test/bad-podcontrollers.yaml new file mode 100644 index 000000000..0ef55981b --- /dev/null +++ b/linkerd-cel/prevent-linkerd-pod-injection-override/.chainsaw-test/bad-podcontrollers.yaml @@ -0,0 +1,42 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: baddeploy01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + template: + metadata: + labels: + app: busybox + annotations: + linkerd.io/inject: disabled + spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + annotations: + linkerd.io/inject: disabled + spec: + containers: + - name: hello + image: busybox:1.35 + command: + - "sleep" + - "3600" + restartPolicy: OnFailure \ No newline at end of file diff --git a/linkerd-cel/prevent-linkerd-pod-injection-override/.chainsaw-test/chainsaw-test.yaml b/linkerd-cel/prevent-linkerd-pod-injection-override/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..9051a94fb --- /dev/null +++ b/linkerd-cel/prevent-linkerd-pod-injection-override/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,38 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: prevent-linkerd-pod-injection-override +spec: + steps: + - name: step-01 + try: + - apply: + file: ../prevent-linkerd-pod-injection-override.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: prevent-linkerd-pod-injection-override + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: good-pod.yaml + - apply: + file: good-podcontrollers.yaml + - apply: + expect: + - check: + ($error != null): true + file: bad-pod.yaml + - apply: + expect: + - check: + ($error != null): true + file: bad-podcontrollers.yaml diff --git a/linkerd-cel/prevent-linkerd-pod-injection-override/.chainsaw-test/good-pod.yaml b/linkerd-cel/prevent-linkerd-pod-injection-override/.chainsaw-test/good-pod.yaml new file mode 100644 index 000000000..826bd837a --- /dev/null +++ b/linkerd-cel/prevent-linkerd-pod-injection-override/.chainsaw-test/good-pod.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Pod +metadata: + annotations: + linkerd.io/inject: enabled + name: goodpod01 +spec: + containers: + - image: busybox:1.35 + name: busybox +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod02 +spec: + containers: + - image: busybox:1.35 + name: busybox + resources: {} \ No newline at end of file diff --git a/linkerd-cel/prevent-linkerd-pod-injection-override/.chainsaw-test/good-podcontrollers.yaml b/linkerd-cel/prevent-linkerd-pod-injection-override/.chainsaw-test/good-podcontrollers.yaml new file mode 100644 index 000000000..876c42be2 --- /dev/null +++ b/linkerd-cel/prevent-linkerd-pod-injection-override/.chainsaw-test/good-podcontrollers.yaml @@ -0,0 +1,83 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: gooddeploy01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: gooddeploy02 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + template: + metadata: + labels: + app: busybox + annotations: + linkerd.io/inject: enabled + spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: hello + image: busybox:1.35 + command: + - "sleep" + - "3600" + restartPolicy: OnFailure +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob02 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + annotations: + linkerd.io/inject: enabled + spec: + containers: + - name: hello + image: busybox:1.35 + command: + - "sleep" + - "3600" + restartPolicy: OnFailure \ No newline at end of file diff --git a/linkerd-cel/prevent-linkerd-pod-injection-override/.chainsaw-test/policy-ready.yaml b/linkerd-cel/prevent-linkerd-pod-injection-override/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..2d21edf25 --- /dev/null +++ b/linkerd-cel/prevent-linkerd-pod-injection-override/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: prevent-linkerd-pod-injection-override +status: + ready: true diff --git a/linkerd-cel/prevent-linkerd-pod-injection-override/.kyverno-test/kyverno-test.yaml b/linkerd-cel/prevent-linkerd-pod-injection-override/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..123a46aa0 --- /dev/null +++ b/linkerd-cel/prevent-linkerd-pod-injection-override/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,51 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: prevent-linkerd-pod-injection-override +policies: +- ../prevent-linkerd-pod-injection-override.yaml +resources: +- ../.chainsaw-test/bad-pod.yaml +- ../.chainsaw-test/bad-podcontrollers.yaml +- ../.chainsaw-test/good-pod.yaml +- ../.chainsaw-test/good-podcontrollers.yaml +results: +- policy: prevent-linkerd-pod-injection-override + rule: pod-injection-override + kind: Pod + resources: + - badpod01 + result: fail +- policy: prevent-linkerd-pod-injection-override + rule: pod-injection-override + kind: Deployment + resources: + - baddeploy01 + result: fail +- policy: prevent-linkerd-pod-injection-override + rule: pod-injection-override + kind: CronJob + resources: + - badcronjob01 + result: fail +- policy: prevent-linkerd-pod-injection-override + rule: pod-injection-override + kind: Pod + resources: + - goodpod01 + - goodpod02 + result: pass +- policy: prevent-linkerd-pod-injection-override + rule: pod-injection-override + kind: Deployment + resources: + - gooddeploy01 + - gooddeploy02 + result: pass +- policy: prevent-linkerd-pod-injection-override + rule: pod-injection-override + kind: CronJob + resources: + - goodcronjob01 + - goodcronjob02 + result: pass \ No newline at end of file diff --git a/linkerd-cel/prevent-linkerd-pod-injection-override/artifacthub-pkg.yml b/linkerd-cel/prevent-linkerd-pod-injection-override/artifacthub-pkg.yml new file mode 100644 index 000000000..41668ea02 --- /dev/null +++ b/linkerd-cel/prevent-linkerd-pod-injection-override/artifacthub-pkg.yml @@ -0,0 +1,23 @@ +name: prevent-linkerd-pod-injection-override-cel +version: 1.0.0 +displayName: Prevent Linkerd Pod Injection Override in CEL expressions +description: >- + Setting the annotation on a Pod (or its controller) `linkerd.io/inject` to `disabled` may effectively disable mesh participation for that workload reducing security and visibility. This policy prevents setting the annotation `linkerd.io/inject` to `disabled` for Pods. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/linkerd-cel/prevent-linkerd-pod-injection-override/prevent-linkerd-pod-injection-override.yaml + ``` +keywords: + - kyverno + - Linkerd + - CEL Expressions +readme: | + Setting the annotation on a Pod (or its controller) `linkerd.io/inject` to `disabled` may effectively disable mesh participation for that workload reducing security and visibility. This policy prevents setting the annotation `linkerd.io/inject` to `disabled` for Pods. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Linkerd in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Pod" +digest: 795a7d5ae06f9720bdbcc00ced965d1f7a28540c965628a47abc5621fb8d0033 +createdAt: "2024-05-21T15:39:18Z" diff --git a/linkerd-cel/prevent-linkerd-pod-injection-override/prevent-linkerd-pod-injection-override.yaml b/linkerd-cel/prevent-linkerd-pod-injection-override/prevent-linkerd-pod-injection-override.yaml new file mode 100644 index 000000000..cbed7f953 --- /dev/null +++ b/linkerd-cel/prevent-linkerd-pod-injection-override/prevent-linkerd-pod-injection-override.yaml @@ -0,0 +1,35 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: prevent-linkerd-pod-injection-override + annotations: + policies.kyverno.io/title: Prevent Linkerd Pod Injection Override in CEL expressions + policies.kyverno.io/category: Linkerd in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Pod + kyverno.io/kyverno-version: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Setting the annotation on a Pod (or its controller) `linkerd.io/inject` to + `disabled` may effectively disable mesh participation for that workload reducing + security and visibility. This policy prevents setting the annotation `linkerd.io/inject` + to `disabled` for Pods. +spec: + validationFailureAction: Audit + background: true + rules: + - name: pod-injection-override + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "!has(object.metadata.annotations) || !('linkerd.io/inject' in object.metadata.annotations) || object.metadata.annotations['linkerd.io/inject'] != 'disabled'" + message: "Pods may not disable sidecar injection." + diff --git a/linkerd-cel/prevent-linkerd-port-skipping/.chainsaw-test/bad-pod.yaml b/linkerd-cel/prevent-linkerd-port-skipping/.chainsaw-test/bad-pod.yaml new file mode 100644 index 000000000..1d3b1cbc3 --- /dev/null +++ b/linkerd-cel/prevent-linkerd-port-skipping/.chainsaw-test/bad-pod.yaml @@ -0,0 +1,36 @@ +apiVersion: v1 +kind: Pod +metadata: + annotations: + foo: bar + config.linkerd.io/skip-inbound-ports: "true" + name: badpod01 +spec: + containers: + - image: busybox:1.35 + name: busybox +--- +apiVersion: v1 +kind: Pod +metadata: + annotations: + config.linkerd.io/skip-outbound-ports: "true" + foo: bar + name: badpod02 +spec: + containers: + - image: busybox:1.35 + name: busybox +--- +apiVersion: v1 +kind: Pod +metadata: + annotations: + foo: bar + config.linkerd.io/skip-outbound-ports: "true" + config.linkerd.io/skip-inbound-ports: "true" + name: badpod03 +spec: + containers: + - image: busybox:1.35 + name: busybox \ No newline at end of file diff --git a/linkerd-cel/prevent-linkerd-port-skipping/.chainsaw-test/bad-podcontrollers.yaml b/linkerd-cel/prevent-linkerd-port-skipping/.chainsaw-test/bad-podcontrollers.yaml new file mode 100644 index 000000000..3d4093257 --- /dev/null +++ b/linkerd-cel/prevent-linkerd-port-skipping/.chainsaw-test/bad-podcontrollers.yaml @@ -0,0 +1,136 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: baddeploy01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + template: + metadata: + labels: + app: busybox + annotations: + foo: bar + config.linkerd.io/skip-inbound-ports: "true" + spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: baddeploy02 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + template: + metadata: + labels: + app: busybox + annotations: + config.linkerd.io/skip-outbound-ports: "true" + foo: bar + spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: baddeploy03 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + template: + metadata: + labels: + app: busybox + annotations: + foo: bar + config.linkerd.io/skip-inbound-ports: "true" + config.linkerd.io/skip-outbound-ports: "true" + spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + annotations: + config.linkerd.io/skip-outbound-ports: "true" + foo: bar + spec: + containers: + - name: hello + image: busybox:1.35 + command: + - "sleep" + - "3600" + restartPolicy: OnFailure +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob02 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + annotations: + foo: bar + config.linkerd.io/skip-inbound-ports: "true" + spec: + containers: + - name: hello + image: busybox:1.35 + command: + - "sleep" + - "3600" + restartPolicy: OnFailure +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob03 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + annotations: + foo: bar + config.linkerd.io/skip-outbound-ports: "true" + config.linkerd.io/skip-inbound-ports: "true" + spec: + containers: + - name: hello + image: busybox:1.35 + command: + - "sleep" + - "3600" + restartPolicy: OnFailure \ No newline at end of file diff --git a/linkerd-cel/prevent-linkerd-port-skipping/.chainsaw-test/chainsaw-test.yaml b/linkerd-cel/prevent-linkerd-port-skipping/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..c85c17614 --- /dev/null +++ b/linkerd-cel/prevent-linkerd-port-skipping/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,38 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: prevent-linkerd-port-skipping +spec: + steps: + - name: step-01 + try: + - apply: + file: ../prevent-linkerd-port-skipping.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: prevent-linkerd-port-skipping + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: good-pod.yaml + - apply: + file: good-podcontrollers.yaml + - apply: + expect: + - check: + ($error != null): true + file: bad-pod.yaml + - apply: + expect: + - check: + ($error != null): true + file: bad-podcontrollers.yaml diff --git a/linkerd-cel/prevent-linkerd-port-skipping/.chainsaw-test/good-pod.yaml b/linkerd-cel/prevent-linkerd-port-skipping/.chainsaw-test/good-pod.yaml new file mode 100644 index 000000000..feddec893 --- /dev/null +++ b/linkerd-cel/prevent-linkerd-port-skipping/.chainsaw-test/good-pod.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Pod +metadata: + name: goodpod01 +spec: + containers: + - image: busybox:1.35 + name: busybox +--- +apiVersion: v1 +kind: Pod +metadata: + annotations: + foo: bar + name: goodpod02 +spec: + containers: + - image: busybox:1.35 + name: busybox + resources: {} \ No newline at end of file diff --git a/linkerd-cel/prevent-linkerd-port-skipping/.chainsaw-test/good-podcontrollers.yaml b/linkerd-cel/prevent-linkerd-port-skipping/.chainsaw-test/good-podcontrollers.yaml new file mode 100644 index 000000000..e8c4c6706 --- /dev/null +++ b/linkerd-cel/prevent-linkerd-port-skipping/.chainsaw-test/good-podcontrollers.yaml @@ -0,0 +1,83 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: gooddeploy01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: gooddeploy02 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + template: + metadata: + labels: + app: busybox + annotations: + foo: bar + spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: hello + image: busybox:1.35 + command: + - "sleep" + - "3600" + restartPolicy: OnFailure +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob02 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + annotations: + foo: bar + spec: + containers: + - name: hello + image: busybox:1.35 + command: + - "sleep" + - "3600" + restartPolicy: OnFailure \ No newline at end of file diff --git a/linkerd-cel/prevent-linkerd-port-skipping/.chainsaw-test/policy-ready.yaml b/linkerd-cel/prevent-linkerd-port-skipping/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..538df5440 --- /dev/null +++ b/linkerd-cel/prevent-linkerd-port-skipping/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: prevent-linkerd-port-skipping +status: + ready: true diff --git a/linkerd-cel/prevent-linkerd-port-skipping/.kyverno-test/kyverno-test.yaml b/linkerd-cel/prevent-linkerd-port-skipping/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..3eef768b6 --- /dev/null +++ b/linkerd-cel/prevent-linkerd-port-skipping/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,58 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: prevent-linkerd-port-skipping +policies: +- ../prevent-linkerd-port-skipping.yaml +resources: +- ../.chainsaw-test/bad-pod.yaml +- ../.chainsaw-test/bad-podcontrollers.yaml +- ../.chainsaw-test/good-pod.yaml +- ../.chainsaw-test/good-podcontrollers.yaml +results: +- policy: prevent-linkerd-port-skipping + rule: pod-prevent-port-skipping + kind: Pod + resources: + - badpod01 + - badpod02 + - badpod03 + result: fail +- policy: prevent-linkerd-port-skipping + rule: pod-prevent-port-skipping + kind: Deployment + resources: + - baddeploy01 + - baddeploy02 + - baddeploy03 + result: fail +- policy: prevent-linkerd-port-skipping + rule: pod-prevent-port-skipping + kind: CronJob + resources: + - badcronjob01 + - badcronjob02 + - badcronjob03 + result: fail +- policy: prevent-linkerd-port-skipping + rule: pod-prevent-port-skipping + kind: Pod + resources: + - goodpod01 + - goodpod02 + result: pass +- policy: prevent-linkerd-port-skipping + rule: pod-prevent-port-skipping + kind: Deployment + resources: + - gooddeploy01 + - gooddeploy02 + result: pass +- policy: prevent-linkerd-port-skipping + rule: pod-prevent-port-skipping + kind: CronJob + resources: + - goodcronjob01 + - goodcronjob02 + result: pass + diff --git a/linkerd-cel/prevent-linkerd-port-skipping/artifacthub-pkg.yml b/linkerd-cel/prevent-linkerd-port-skipping/artifacthub-pkg.yml new file mode 100644 index 000000000..4ab092a6c --- /dev/null +++ b/linkerd-cel/prevent-linkerd-port-skipping/artifacthub-pkg.yml @@ -0,0 +1,23 @@ +name: prevent-linkerd-port-skipping-cel +version: 1.0.0 +displayName: Prevent Linkerd Port Skipping in CEL expressions +description: >- + Linkerd has the ability to skip inbound and outbound ports assigned to Pods, exempting them from mTLS. This can be important in some narrow use cases but generally should be avoided. This policy prevents Pods from setting the annotations `config.linkerd.io/skip-inbound-ports` or `config.linkerd.io/skip-outbound-ports`. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/linkerd-cel/prevent-linkerd-port-skipping/prevent-linkerd-port-skipping.yaml + ``` +keywords: + - kyverno + - Linkerd + - CEL Expressions +readme: | + Linkerd has the ability to skip inbound and outbound ports assigned to Pods, exempting them from mTLS. This can be important in some narrow use cases but generally should be avoided. This policy prevents Pods from setting the annotations `config.linkerd.io/skip-inbound-ports` or `config.linkerd.io/skip-outbound-ports`. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Linkerd in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Pod" +digest: bb047cee4e04c6939ccbdafe386dc0a84ea7e7242fe476a67ab6daf93dbec98f +createdAt: "2024-05-21T15:57:57Z" diff --git a/linkerd-cel/prevent-linkerd-port-skipping/prevent-linkerd-port-skipping.yaml b/linkerd-cel/prevent-linkerd-port-skipping/prevent-linkerd-port-skipping.yaml new file mode 100644 index 000000000..d95aca938 --- /dev/null +++ b/linkerd-cel/prevent-linkerd-port-skipping/prevent-linkerd-port-skipping.yaml @@ -0,0 +1,37 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: prevent-linkerd-port-skipping + annotations: + policies.kyverno.io/title: Prevent Linkerd Port Skipping in CEL expressions + policies.kyverno.io/category: Linkerd in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Pod + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Linkerd has the ability to skip inbound and outbound ports assigned to Pods, exempting + them from mTLS. This can be important in some narrow use cases but + generally should be avoided. This policy prevents Pods from setting + the annotations `config.linkerd.io/skip-inbound-ports` or `config.linkerd.io/skip-outbound-ports`. +spec: + validationFailureAction: Audit + background: true + rules: + - name: pod-prevent-port-skipping + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + !has(object.metadata.annotations) || + (!('config.linkerd.io/skip-inbound-ports' in object.metadata.annotations) && !('config.linkerd.io/skip-outbound-ports' in object.metadata.annotations)) + message: "Pods may not skip ports. The annotations `config.linkerd.io/skip-inbound-ports` or `config.linkerd.io/skip-outbound-ports` must not be set." + diff --git a/linkerd-cel/require-linkerd-mesh-injection/.chainsaw-test/bad-ns.yaml b/linkerd-cel/require-linkerd-mesh-injection/.chainsaw-test/bad-ns.yaml new file mode 100644 index 000000000..211682121 --- /dev/null +++ b/linkerd-cel/require-linkerd-mesh-injection/.chainsaw-test/bad-ns.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Namespace +metadata: + annotations: + foo: bar + linkerd.io/inject: disabled + name: ld-meshinj-badns01 +--- +apiVersion: v1 +kind: Namespace +metadata: + annotations: + foo: bar + name: ld-meshinj-badns02 +--- +apiVersion: v1 +kind: Namespace +metadata: + name: ld-meshinj-badns03 \ No newline at end of file diff --git a/linkerd-cel/require-linkerd-mesh-injection/.chainsaw-test/chainsaw-test.yaml b/linkerd-cel/require-linkerd-mesh-injection/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..84d387eca --- /dev/null +++ b/linkerd-cel/require-linkerd-mesh-injection/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,31 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: require-linkerd-mesh-injection +spec: + steps: + - name: step-01 + try: + - apply: + file: ../require-linkerd-mesh-injection.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: require-linkerd-mesh-injection + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: good-ns.yaml + - apply: + expect: + - check: + ($error != null): true + file: bad-ns.yaml diff --git a/linkerd-cel/require-linkerd-mesh-injection/.chainsaw-test/good-ns.yaml b/linkerd-cel/require-linkerd-mesh-injection/.chainsaw-test/good-ns.yaml new file mode 100644 index 000000000..649948782 --- /dev/null +++ b/linkerd-cel/require-linkerd-mesh-injection/.chainsaw-test/good-ns.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Namespace +metadata: + annotations: + foo: bar + linkerd.io/inject: enabled + name: ld-meshinj-goodns01 +--- +apiVersion: v1 +kind: Namespace +metadata: + annotations: + linkerd.io/inject: enabled + foo: bar + name: ld-meshinj-goodns02 \ No newline at end of file diff --git a/linkerd-cel/require-linkerd-mesh-injection/.chainsaw-test/policy-ready.yaml b/linkerd-cel/require-linkerd-mesh-injection/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..0a6966bd9 --- /dev/null +++ b/linkerd-cel/require-linkerd-mesh-injection/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-linkerd-mesh-injection +status: + ready: true diff --git a/linkerd-cel/require-linkerd-mesh-injection/.kyverno-test/kyverno-test.yaml b/linkerd-cel/require-linkerd-mesh-injection/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..8c009f0b0 --- /dev/null +++ b/linkerd-cel/require-linkerd-mesh-injection/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,26 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: require-linkerd-mesh-injection +policies: +- ../require-linkerd-mesh-injection.yaml +resources: +- ../.chainsaw-test/bad-ns.yaml +- ../.chainsaw-test/good-ns.yaml +results: +- policy: require-linkerd-mesh-injection + rule: require-mesh-annotation + kind: Namespace + resources: + - ld-meshinj-badns01 + - ld-meshinj-badns02 + - ld-meshinj-badns03 + result: fail +- policy: require-linkerd-mesh-injection + rule: require-mesh-annotation + kind: Namespace + resources: + - ld-meshinj-goodns01 + - ld-meshinj-goodns02 + result: pass + diff --git a/linkerd-cel/require-linkerd-mesh-injection/artifacthub-pkg.yml b/linkerd-cel/require-linkerd-mesh-injection/artifacthub-pkg.yml new file mode 100644 index 000000000..647ab852c --- /dev/null +++ b/linkerd-cel/require-linkerd-mesh-injection/artifacthub-pkg.yml @@ -0,0 +1,23 @@ +name: require-linkerd-mesh-injection-cel +version: 1.0.0 +displayName: Require Linkerd Mesh Injection in CEL expressions +description: >- + Sidecar proxy injection in Linkerd may be handled at the Namespace level by setting the annotation `linkerd.io/inject` to `enabled`. This policy enforces that all Namespaces contain the annotation `linkerd.io/inject` set to `enabled`. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/linkerd-cel/require-linkerd-mesh-injection/require-linkerd-mesh-injection.yaml + ``` +keywords: + - kyverno + - Linkerd + - CEL Expressions +readme: | + Sidecar proxy injection in Linkerd may be handled at the Namespace level by setting the annotation `linkerd.io/inject` to `enabled`. This policy enforces that all Namespaces contain the annotation `linkerd.io/inject` set to `enabled`. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Linkerd in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Namespace, Annotation" +digest: 54785b725fde31418dffca17c8b9eb619c64db8351743d370b5f628e5235fd93 +createdAt: "2024-05-21T16:06:15Z" diff --git a/linkerd-cel/require-linkerd-mesh-injection/require-linkerd-mesh-injection.yaml b/linkerd-cel/require-linkerd-mesh-injection/require-linkerd-mesh-injection.yaml new file mode 100644 index 000000000..d05c38ec6 --- /dev/null +++ b/linkerd-cel/require-linkerd-mesh-injection/require-linkerd-mesh-injection.yaml @@ -0,0 +1,34 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-linkerd-mesh-injection + annotations: + policies.kyverno.io/title: Require Linkerd Mesh Injection in CEL expressions + policies.kyverno.io/category: Linkerd in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Namespace, Annotation + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Sidecar proxy injection in Linkerd may be handled at the Namespace level by + setting the annotation `linkerd.io/inject` to `enabled`. This policy enforces that + all Namespaces contain the annotation `linkerd.io/inject` set to `enabled`. +spec: + validationFailureAction: Audit + background: true + rules: + - name: require-mesh-annotation + match: + any: + - resources: + kinds: + - Namespace + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "has(object.metadata.annotations) && 'linkerd.io/inject' in object.metadata.annotations && object.metadata.annotations['linkerd.io/inject'] == 'enabled'" + message: "All Namespaces must set the annotation `linkerd.io/inject` to `enabled`." + diff --git a/linkerd/prevent-linkerd-pod-injection-override/.chainsaw-test/good-podcontrollers.yaml b/linkerd/prevent-linkerd-pod-injection-override/.chainsaw-test/good-podcontrollers.yaml index 119385e9d..876c42be2 100644 --- a/linkerd/prevent-linkerd-pod-injection-override/.chainsaw-test/good-podcontrollers.yaml +++ b/linkerd/prevent-linkerd-pod-injection-override/.chainsaw-test/good-podcontrollers.yaml @@ -49,6 +49,9 @@ spec: jobTemplate: spec: template: + metadata: + labels: + app: busybox spec: containers: - name: hello diff --git a/linkerd/prevent-linkerd-pod-injection-override/.kyverno-test/kyverno-test.yaml b/linkerd/prevent-linkerd-pod-injection-override/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..123a46aa0 --- /dev/null +++ b/linkerd/prevent-linkerd-pod-injection-override/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,51 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: prevent-linkerd-pod-injection-override +policies: +- ../prevent-linkerd-pod-injection-override.yaml +resources: +- ../.chainsaw-test/bad-pod.yaml +- ../.chainsaw-test/bad-podcontrollers.yaml +- ../.chainsaw-test/good-pod.yaml +- ../.chainsaw-test/good-podcontrollers.yaml +results: +- policy: prevent-linkerd-pod-injection-override + rule: pod-injection-override + kind: Pod + resources: + - badpod01 + result: fail +- policy: prevent-linkerd-pod-injection-override + rule: pod-injection-override + kind: Deployment + resources: + - baddeploy01 + result: fail +- policy: prevent-linkerd-pod-injection-override + rule: pod-injection-override + kind: CronJob + resources: + - badcronjob01 + result: fail +- policy: prevent-linkerd-pod-injection-override + rule: pod-injection-override + kind: Pod + resources: + - goodpod01 + - goodpod02 + result: pass +- policy: prevent-linkerd-pod-injection-override + rule: pod-injection-override + kind: Deployment + resources: + - gooddeploy01 + - gooddeploy02 + result: pass +- policy: prevent-linkerd-pod-injection-override + rule: pod-injection-override + kind: CronJob + resources: + - goodcronjob01 + - goodcronjob02 + result: pass \ No newline at end of file diff --git a/linkerd/prevent-linkerd-port-skipping/.chainsaw-test/bad-pod.yaml b/linkerd/prevent-linkerd-port-skipping/.chainsaw-test/bad-pod.yaml index 730df5cbc..1d3b1cbc3 100644 --- a/linkerd/prevent-linkerd-port-skipping/.chainsaw-test/bad-pod.yaml +++ b/linkerd/prevent-linkerd-port-skipping/.chainsaw-test/bad-pod.yaml @@ -3,7 +3,7 @@ kind: Pod metadata: annotations: foo: bar - config.linkerd.io/skip-inbound-ports: true + config.linkerd.io/skip-inbound-ports: "true" name: badpod01 spec: containers: @@ -14,7 +14,7 @@ apiVersion: v1 kind: Pod metadata: annotations: - config.linkerd.io/skip-outbound-ports: true + config.linkerd.io/skip-outbound-ports: "true" foo: bar name: badpod02 spec: @@ -27,8 +27,8 @@ kind: Pod metadata: annotations: foo: bar - config.linkerd.io/skip-outbound-ports: true - config.linkerd.io/skip-inbound-ports: true + config.linkerd.io/skip-outbound-ports: "true" + config.linkerd.io/skip-inbound-ports: "true" name: badpod03 spec: containers: diff --git a/linkerd/prevent-linkerd-port-skipping/.chainsaw-test/bad-podcontrollers.yaml b/linkerd/prevent-linkerd-port-skipping/.chainsaw-test/bad-podcontrollers.yaml index f414b7893..3d4093257 100644 --- a/linkerd/prevent-linkerd-port-skipping/.chainsaw-test/bad-podcontrollers.yaml +++ b/linkerd/prevent-linkerd-port-skipping/.chainsaw-test/bad-podcontrollers.yaml @@ -15,7 +15,7 @@ spec: app: busybox annotations: foo: bar - config.linkerd.io/skip-inbound-ports: true + config.linkerd.io/skip-inbound-ports: "true" spec: containers: - name: busybox @@ -37,7 +37,7 @@ spec: labels: app: busybox annotations: - config.linkerd.io/skip-outbound-ports: true + config.linkerd.io/skip-outbound-ports: "true" foo: bar spec: containers: @@ -61,8 +61,8 @@ spec: app: busybox annotations: foo: bar - config.linkerd.io/skip-inbound-ports: true - config.linkerd.io/skip-outbound-ports: true + config.linkerd.io/skip-inbound-ports: "true" + config.linkerd.io/skip-outbound-ports: "true" spec: containers: - name: busybox @@ -79,7 +79,7 @@ spec: template: metadata: annotations: - config.linkerd.io/skip-outbound-ports: true + config.linkerd.io/skip-outbound-ports: "true" foo: bar spec: containers: @@ -102,7 +102,7 @@ spec: metadata: annotations: foo: bar - config.linkerd.io/skip-inbound-ports: true + config.linkerd.io/skip-inbound-ports: "true" spec: containers: - name: hello @@ -124,8 +124,8 @@ spec: metadata: annotations: foo: bar - config.linkerd.io/skip-outbound-ports: true - config.linkerd.io/skip-inbound-ports: true + config.linkerd.io/skip-outbound-ports: "true" + config.linkerd.io/skip-inbound-ports: "true" spec: containers: - name: hello diff --git a/linkerd/prevent-linkerd-port-skipping/.chainsaw-test/good-podcontrollers.yaml b/linkerd/prevent-linkerd-port-skipping/.chainsaw-test/good-podcontrollers.yaml index d1605961a..e8c4c6706 100644 --- a/linkerd/prevent-linkerd-port-skipping/.chainsaw-test/good-podcontrollers.yaml +++ b/linkerd/prevent-linkerd-port-skipping/.chainsaw-test/good-podcontrollers.yaml @@ -49,6 +49,9 @@ spec: jobTemplate: spec: template: + metadata: + labels: + app: busybox spec: containers: - name: hello diff --git a/linkerd/prevent-linkerd-port-skipping/.kyverno-test/kyverno-test.yaml b/linkerd/prevent-linkerd-port-skipping/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..3eef768b6 --- /dev/null +++ b/linkerd/prevent-linkerd-port-skipping/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,58 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: prevent-linkerd-port-skipping +policies: +- ../prevent-linkerd-port-skipping.yaml +resources: +- ../.chainsaw-test/bad-pod.yaml +- ../.chainsaw-test/bad-podcontrollers.yaml +- ../.chainsaw-test/good-pod.yaml +- ../.chainsaw-test/good-podcontrollers.yaml +results: +- policy: prevent-linkerd-port-skipping + rule: pod-prevent-port-skipping + kind: Pod + resources: + - badpod01 + - badpod02 + - badpod03 + result: fail +- policy: prevent-linkerd-port-skipping + rule: pod-prevent-port-skipping + kind: Deployment + resources: + - baddeploy01 + - baddeploy02 + - baddeploy03 + result: fail +- policy: prevent-linkerd-port-skipping + rule: pod-prevent-port-skipping + kind: CronJob + resources: + - badcronjob01 + - badcronjob02 + - badcronjob03 + result: fail +- policy: prevent-linkerd-port-skipping + rule: pod-prevent-port-skipping + kind: Pod + resources: + - goodpod01 + - goodpod02 + result: pass +- policy: prevent-linkerd-port-skipping + rule: pod-prevent-port-skipping + kind: Deployment + resources: + - gooddeploy01 + - gooddeploy02 + result: pass +- policy: prevent-linkerd-port-skipping + rule: pod-prevent-port-skipping + kind: CronJob + resources: + - goodcronjob01 + - goodcronjob02 + result: pass + diff --git a/linkerd/require-linkerd-mesh-injection/.kyverno-test/kyverno-test.yaml b/linkerd/require-linkerd-mesh-injection/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..8c009f0b0 --- /dev/null +++ b/linkerd/require-linkerd-mesh-injection/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,26 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: require-linkerd-mesh-injection +policies: +- ../require-linkerd-mesh-injection.yaml +resources: +- ../.chainsaw-test/bad-ns.yaml +- ../.chainsaw-test/good-ns.yaml +results: +- policy: require-linkerd-mesh-injection + rule: require-mesh-annotation + kind: Namespace + resources: + - ld-meshinj-badns01 + - ld-meshinj-badns02 + - ld-meshinj-badns03 + result: fail +- policy: require-linkerd-mesh-injection + rule: require-mesh-annotation + kind: Namespace + resources: + - ld-meshinj-goodns01 + - ld-meshinj-goodns02 + result: pass + diff --git a/nginx-ingress-cel/disallow-ingress-nginx-custom-snippets/.chainsaw-test/chainsaw-test.yaml b/nginx-ingress-cel/disallow-ingress-nginx-custom-snippets/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..f8d331033 --- /dev/null +++ b/nginx-ingress-cel/disallow-ingress-nginx-custom-snippets/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,30 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: disallow-ingress-nginx-custom-snippets +spec: + steps: + - name: step-01 + try: + - apply: + file: ../disallow-ingress-nginx-custom-snippets.yaml + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: cm-good.yaml + - apply: + file: ig-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: cm-bad.yaml + - apply: + expect: + - check: + ($error != null): true + file: ig-bad.yaml diff --git a/nginx-ingress-cel/disallow-ingress-nginx-custom-snippets/.chainsaw-test/cm-bad.yaml b/nginx-ingress-cel/disallow-ingress-nginx-custom-snippets/.chainsaw-test/cm-bad.yaml new file mode 100644 index 000000000..177ac0678 --- /dev/null +++ b/nginx-ingress-cel/disallow-ingress-nginx-custom-snippets/.chainsaw-test/cm-bad.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +data: + allow-snippet-annotations: "true" +kind: ConfigMap +metadata: + name: config-map-true diff --git a/nginx-ingress-cel/disallow-ingress-nginx-custom-snippets/.chainsaw-test/cm-good.yaml b/nginx-ingress-cel/disallow-ingress-nginx-custom-snippets/.chainsaw-test/cm-good.yaml new file mode 100644 index 000000000..6ec1541ac --- /dev/null +++ b/nginx-ingress-cel/disallow-ingress-nginx-custom-snippets/.chainsaw-test/cm-good.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +data: + allow-snippet-annotations: "false" +kind: ConfigMap +metadata: + name: config-map-false +--- +apiVersion: v1 +data: + random: "someval" +kind: ConfigMap +metadata: + name: config-map-other +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-map-empty \ No newline at end of file diff --git a/nginx-ingress-cel/disallow-ingress-nginx-custom-snippets/.chainsaw-test/ig-bad.yaml b/nginx-ingress-cel/disallow-ingress-nginx-custom-snippets/.chainsaw-test/ig-bad.yaml new file mode 100644 index 000000000..82c289e7c --- /dev/null +++ b/nginx-ingress-cel/disallow-ingress-nginx-custom-snippets/.chainsaw-test/ig-bad.yaml @@ -0,0 +1,63 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: cafe-ingress-with-snippets + annotations: + foo: bar + nginx.org/server-snippet: | + location / { + return 302 /coffee; + } + nginx.org/location-snippet: | + add_header my-test-header test-value; +spec: + rules: + - host: cafe.example.com + http: + paths: + - path: /tea + pathType: Prefix + backend: + service: + name: tea-svc + port: + number: 80 + - path: /coffee + pathType: Prefix + backend: + service: + name: coffee-svc + port: + number: 80 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: cafe-ingress + annotations: + nginx.org/server-snippet: | + location / { + return 302 /coffee; + } + nginx.org/location-snippet: | + add_header my-test-header test-value; + foo: bar +spec: + rules: + - host: cafe.example.com + http: + paths: + - path: /tea + pathType: Prefix + backend: + service: + name: tea-svc + port: + number: 80 + - path: /coffee + pathType: Prefix + backend: + service: + name: coffee-svc + port: + number: 80 \ No newline at end of file diff --git a/nginx-ingress-cel/disallow-ingress-nginx-custom-snippets/.chainsaw-test/ig-good.yaml b/nginx-ingress-cel/disallow-ingress-nginx-custom-snippets/.chainsaw-test/ig-good.yaml new file mode 100644 index 000000000..37a7cec08 --- /dev/null +++ b/nginx-ingress-cel/disallow-ingress-nginx-custom-snippets/.chainsaw-test/ig-good.yaml @@ -0,0 +1,50 @@ + +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: cafe-ingress-with-snippets + annotations: + foo: bar +spec: + rules: + - host: cafe.example.com + http: + paths: + - path: /tea + pathType: Prefix + backend: + service: + name: tea-svc + port: + number: 80 + - path: /coffee + pathType: Prefix + backend: + service: + name: coffee-svc + port: + number: 80 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: cafe-ingress +spec: + rules: + - host: cafe.example.com + http: + paths: + - path: /tea + pathType: Prefix + backend: + service: + name: tea-svc + port: + number: 80 + - path: /coffee + pathType: Prefix + backend: + service: + name: coffee-svc + port: + number: 80 \ No newline at end of file diff --git a/nginx-ingress-cel/disallow-ingress-nginx-custom-snippets/.chainsaw-test/policy-ready.yaml b/nginx-ingress-cel/disallow-ingress-nginx-custom-snippets/.chainsaw-test/policy-ready.yaml new file mode 100644 index 000000000..8419e2c67 --- /dev/null +++ b/nginx-ingress-cel/disallow-ingress-nginx-custom-snippets/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: disallow-ingress-nginx-custom-snippets +status: + ready: true \ No newline at end of file diff --git a/nginx-ingress-cel/disallow-ingress-nginx-custom-snippets/.kyverno-test/kyverno-test.yaml b/nginx-ingress-cel/disallow-ingress-nginx-custom-snippets/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..d668087bf --- /dev/null +++ b/nginx-ingress-cel/disallow-ingress-nginx-custom-snippets/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,35 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: disallow_nginx_custom_snippets +policies: +- ../disallow-ingress-nginx-custom-snippets.yaml +resources: +- resources.yaml +results: +- kind: ConfigMap + policy: disallow-ingress-nginx-custom-snippets + resources: + - config-map-true + result: fail + rule: check-config-map +- kind: ConfigMap + policy: disallow-ingress-nginx-custom-snippets + resources: + - config-map-false + - config-map-other + - config-map-empty + result: pass + rule: check-config-map +- kind: Ingress + policy: disallow-ingress-nginx-custom-snippets + resources: + - cafe-ingress-with-snippets + result: fail + rule: check-ingress-annotations +- kind: Ingress + policy: disallow-ingress-nginx-custom-snippets + resources: + - cafe-ingress + result: pass + rule: check-ingress-annotations diff --git a/nginx-ingress-cel/disallow-ingress-nginx-custom-snippets/.kyverno-test/resources.yaml b/nginx-ingress-cel/disallow-ingress-nginx-custom-snippets/.kyverno-test/resources.yaml new file mode 100644 index 000000000..062f1f953 --- /dev/null +++ b/nginx-ingress-cel/disallow-ingress-nginx-custom-snippets/.kyverno-test/resources.yaml @@ -0,0 +1,81 @@ +--- +apiVersion: v1 +data: + allow-snippet-annotations: "false" +kind: ConfigMap +metadata: + name: config-map-false +--- +apiVersion: v1 +data: + allow-snippet-annotations: "true" +kind: ConfigMap +metadata: + name: config-map-true +--- +apiVersion: v1 +data: + random: "someval" +kind: ConfigMap +metadata: + name: config-map-other +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-map-empty +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: cafe-ingress-with-snippets + annotations: + nginx.org/server-snippet: | + location / { + return 302 /coffee; + } + nginx.org/location-snippet: | + add_header my-test-header test-value; +spec: + rules: + - host: cafe.example.com + http: + paths: + - path: /tea + pathType: Prefix + backend: + service: + name: tea-svc + port: + number: 80 + - path: /coffee + pathType: Prefix + backend: + service: + name: coffee-svc + port: + number: 80 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: cafe-ingress +spec: + rules: + - host: cafe.example.com + http: + paths: + - path: /tea + pathType: Prefix + backend: + service: + name: tea-svc + port: + number: 80 + - path: /coffee + pathType: Prefix + backend: + service: + name: coffee-svc + port: + number: 80 \ No newline at end of file diff --git a/nginx-ingress-cel/disallow-ingress-nginx-custom-snippets/artifacthub-pkg.yml b/nginx-ingress-cel/disallow-ingress-nginx-custom-snippets/artifacthub-pkg.yml new file mode 100644 index 000000000..1aa414fe7 --- /dev/null +++ b/nginx-ingress-cel/disallow-ingress-nginx-custom-snippets/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: disallow-ingress-nginx-custom-snippets-cel +version: 1.0.0 +displayName: Disallow Custom Snippets in CEL expressions +description: >- + Users that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (CVE-2021-25742). This policy disables allow-snippet-annotations in the ingress-nginx configuration and blocks *-snippet annotations on an Ingress. See: https://github.com/kubernetes/ingress-nginx/issues/7837 +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/nginx-ingress-cel/disallow-ingress-nginx-custom-snippets/disallow-ingress-nginx-custom-snippets.yaml + ``` +keywords: + - kyverno + - Security + - NGINX Ingress + - CEL Expressions +readme: | + Users that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (CVE-2021-25742). This policy disables allow-snippet-annotations in the ingress-nginx configuration and blocks *-snippet annotations on an Ingress. See: https://github.com/kubernetes/ingress-nginx/issues/7837 + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Security, NGINX Ingress in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "ConfigMap, Ingress" +digest: aaf1d6d140eb40ced231f9b1c1e58c76eb89c1974def85df5f0152b72b8d398b +createdAt: "2024-05-21T16:14:12Z" diff --git a/nginx-ingress-cel/disallow-ingress-nginx-custom-snippets/disallow-ingress-nginx-custom-snippets.yaml b/nginx-ingress-cel/disallow-ingress-nginx-custom-snippets/disallow-ingress-nginx-custom-snippets.yaml new file mode 100644 index 000000000..e7a098251 --- /dev/null +++ b/nginx-ingress-cel/disallow-ingress-nginx-custom-snippets/disallow-ingress-nginx-custom-snippets.yaml @@ -0,0 +1,49 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: disallow-ingress-nginx-custom-snippets + annotations: + policies.kyverno.io/title: Disallow Custom Snippets in CEL expressions + policies.kyverno.io/category: Security, NGINX Ingress in CEL + policies.kyverno.io/subject: ConfigMap, Ingress + policies.kyverno.io/minversion: "1.11.0" + kyverno.io/kyverno-version: "1.11.0" + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Users that can create or update ingress objects can use the custom snippets + feature to obtain all secrets in the cluster (CVE-2021-25742). This policy + disables allow-snippet-annotations in the ingress-nginx configuration and + blocks *-snippet annotations on an Ingress. + See: https://github.com/kubernetes/ingress-nginx/issues/7837 +spec: + validationFailureAction: Enforce + rules: + - name: check-config-map + match: + any: + - resources: + kinds: + - ConfigMap + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "!has(object.data) || !('allow-snippet-annotations' in object.data) || object.data['allow-snippet-annotations'] == 'false'" + message: "ingress-nginx allow-snippet-annotations must be set to false" + - name: check-ingress-annotations + match: + any: + - resources: + kinds: + - networking.k8s.io/v1/Ingress + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "!has(object.metadata.annotations) || !object.metadata.annotations.exists(annotation, annotation.endsWith('-snippet'))" + message: "ingress-nginx custom snippets are not allowed" + diff --git a/nginx-ingress-cel/restrict-annotations/.chainsaw-test/chainsaw-test.yaml b/nginx-ingress-cel/restrict-annotations/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..587740382 --- /dev/null +++ b/nginx-ingress-cel/restrict-annotations/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,23 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: restrict-annotations +spec: + steps: + - name: step-01 + try: + - apply: + file: ../restrict-annotations.yaml + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: ig-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: ig-bad.yaml diff --git a/nginx-ingress-cel/restrict-annotations/.chainsaw-test/ig-bad.yaml b/nginx-ingress-cel/restrict-annotations/.chainsaw-test/ig-bad.yaml new file mode 100644 index 000000000..fd98e54ca --- /dev/null +++ b/nginx-ingress-cel/restrict-annotations/.chainsaw-test/ig-bad.yaml @@ -0,0 +1,129 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: cafe-ingress-with-snippets + annotations: + nginx.org/bad: "alias; " +spec: + rules: + - host: cafe.example.com + http: + paths: + - path: /tea + pathType: Prefix + backend: + service: + name: tea-svc + port: + number: 80 + - path: /coffee + pathType: Prefix + backend: + service: + name: coffee-svc + port: + number: 80 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: cafe-ingress-with-snippets + annotations: + nginx.org/bad: " root ;" +spec: + rules: + - host: cafe.example.com + http: + paths: + - path: /tea + pathType: Prefix + backend: + service: + name: tea-svc + port: + number: 80 + - path: /coffee + pathType: Prefix + backend: + service: + name: coffee-svc + port: + number: 80 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: etc-passwd + annotations: + nginx.org/bad: "/etc/passwd" +spec: + rules: + - host: cafe.example.com + http: + paths: + - path: /tea + pathType: Prefix + backend: + service: + name: tea-svc + port: + number: 80 + - path: /coffee + pathType: Prefix + backend: + service: + name: coffee-svc + port: + number: 80 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: var-run-secrets + annotations: + nginx.org/bad: "/var/run/secrets" +spec: + rules: + - host: cafe.example.com + http: + paths: + - path: /tea + pathType: Prefix + backend: + service: + name: tea-svc + port: + number: 80 + - path: /coffee + pathType: Prefix + backend: + service: + name: coffee-svc + port: + number: 80 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: lua + annotations: + nginx.org/bad: "*! _by_lua 8010-191091" +spec: + rules: + - host: cafe.example.com + http: + paths: + - path: /tea + pathType: Prefix + backend: + service: + name: tea-svc + port: + number: 80 + - path: /coffee + pathType: Prefix + backend: + service: + name: coffee-svc + port: + number: 80 \ No newline at end of file diff --git a/nginx-ingress-cel/restrict-annotations/.chainsaw-test/ig-good.yaml b/nginx-ingress-cel/restrict-annotations/.chainsaw-test/ig-good.yaml new file mode 100644 index 000000000..1f0b3a8ec --- /dev/null +++ b/nginx-ingress-cel/restrict-annotations/.chainsaw-test/ig-good.yaml @@ -0,0 +1,49 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: cafe-ingress-with-snippets + annotations: + nginx.org/good: "value" +spec: + rules: + - host: cafe.example.com + http: + paths: + - path: /tea + pathType: Prefix + backend: + service: + name: tea-svc + port: + number: 80 + - path: /coffee + pathType: Prefix + backend: + service: + name: coffee-svc + port: + number: 80 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: cafe-ingress +spec: + rules: + - host: cafe.example.com + http: + paths: + - path: /tea + pathType: Prefix + backend: + service: + name: tea-svc + port: + number: 80 + - path: /coffee + pathType: Prefix + backend: + service: + name: coffee-svc + port: + number: 80 \ No newline at end of file diff --git a/nginx-ingress-cel/restrict-annotations/.chainsaw-test/policy-ready.yaml b/nginx-ingress-cel/restrict-annotations/.chainsaw-test/policy-ready.yaml new file mode 100644 index 000000000..3e2289190 --- /dev/null +++ b/nginx-ingress-cel/restrict-annotations/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-annotations +status: + ready: true \ No newline at end of file diff --git a/nginx-ingress-cel/restrict-annotations/.kyverno-test/kyverno-test.yaml b/nginx-ingress-cel/restrict-annotations/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..231768b29 --- /dev/null +++ b/nginx-ingress-cel/restrict-annotations/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,26 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: restrict-annotations +policies: +- ../restrict-annotations.yaml +resources: +- resources.yaml +results: +- kind: Ingress + policy: restrict-annotations + resources: + - alias + - root + - etc-passwd + - var-run-secrets + - lua + result: fail + rule: check-ingress +- kind: Ingress + policy: restrict-annotations + resources: + - no-annotations + - good-annotations + result: pass + rule: check-ingress diff --git a/nginx-ingress-cel/restrict-annotations/.kyverno-test/resources.yaml b/nginx-ingress-cel/restrict-annotations/.kyverno-test/resources.yaml new file mode 100644 index 000000000..ed12c4972 --- /dev/null +++ b/nginx-ingress-cel/restrict-annotations/.kyverno-test/resources.yaml @@ -0,0 +1,180 @@ +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: no-annotations +spec: + rules: + - host: cafe.example.com + http: + paths: + - path: /tea + pathType: Prefix + backend: + service: + name: tea-svc + port: + number: 80 + - path: /coffee + pathType: Prefix + backend: + service: + name: coffee-svc + port: + number: 80 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: good-annotations + annotations: + nginx.org/good: "value" +spec: + rules: + - host: cafe.example.com + http: + paths: + - path: /tea + pathType: Prefix + backend: + service: + name: tea-svc + port: + number: 80 + - path: /coffee + pathType: Prefix + backend: + service: + name: coffee-svc + port: + number: 80 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: alias + annotations: + nginx.org/bad: "alias; " +spec: + rules: + - host: cafe.example.com + http: + paths: + - path: /tea + pathType: Prefix + backend: + service: + name: tea-svc + port: + number: 80 + - path: /coffee + pathType: Prefix + backend: + service: + name: coffee-svc + port: + number: 80 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: root + annotations: + nginx.org/bad: " root ;" +spec: + rules: + - host: cafe.example.com + http: + paths: + - path: /tea + pathType: Prefix + backend: + service: + name: tea-svc + port: + number: 80 + - path: /coffee + pathType: Prefix + backend: + service: + name: coffee-svc + port: + number: 80 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: etc-passwd + annotations: + nginx.org/bad: "/etc/passwd" +spec: + rules: + - host: cafe.example.com + http: + paths: + - path: /tea + pathType: Prefix + backend: + service: + name: tea-svc + port: + number: 80 + - path: /coffee + pathType: Prefix + backend: + service: + name: coffee-svc + port: + number: 80 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: var-run-secrets + annotations: + nginx.org/bad: "/var/run/secrets" +spec: + rules: + - host: cafe.example.com + http: + paths: + - path: /tea + pathType: Prefix + backend: + service: + name: tea-svc + port: + number: 80 + - path: /coffee + pathType: Prefix + backend: + service: + name: coffee-svc + port: + number: 80 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: lua + annotations: + nginx.org/bad: "*! _by_lua 8010-191091" +spec: + rules: + - host: cafe.example.com + http: + paths: + - path: /tea + pathType: Prefix + backend: + service: + name: tea-svc + port: + number: 80 + - path: /coffee + pathType: Prefix + backend: + service: + name: coffee-svc + port: + number: 80 \ No newline at end of file diff --git a/nginx-ingress-cel/restrict-annotations/artifacthub-pkg.yml b/nginx-ingress-cel/restrict-annotations/artifacthub-pkg.yml new file mode 100644 index 000000000..f56be90e7 --- /dev/null +++ b/nginx-ingress-cel/restrict-annotations/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: restrict-annotations-cel +version: 1.0.0 +displayName: Restrict NGINX Ingress annotation values in CEL expressions +description: >- + This policy mitigates CVE-2021-25746 by restricting `metadata.annotations` to safe values. See: https://github.com/kubernetes/ingress-nginx/blame/main/internal/ingress/inspector/rules.go. This issue has been fixed in NGINX Ingress v1.2.0. For NGINX Ingress version 1.0.5+ the "annotation-value-word-blocklist" configuration setting is also recommended. Please refer to the CVE for details. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/nginx-ingress-cel/restrict-annotations/restrict-annotations.yaml + ``` +keywords: + - kyverno + - Security + - NGINX Ingress + - CEL Expressions +readme: | + This policy mitigates CVE-2021-25746 by restricting `metadata.annotations` to safe values. See: https://github.com/kubernetes/ingress-nginx/blame/main/internal/ingress/inspector/rules.go. This issue has been fixed in NGINX Ingress v1.2.0. For NGINX Ingress version 1.0.5+ the "annotation-value-word-blocklist" configuration setting is also recommended. Please refer to the CVE for details. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Security, NGINX Ingress in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Ingress" +digest: 1d65f2c381b323065215afcbc0c0dfaf42b5a3485c4b90ad8dd5035a6f331914 +createdAt: "2024-05-22T06:47:38Z" diff --git a/nginx-ingress-cel/restrict-annotations/restrict-annotations.yaml b/nginx-ingress-cel/restrict-annotations/restrict-annotations.yaml new file mode 100644 index 000000000..cf61a4ac9 --- /dev/null +++ b/nginx-ingress-cel/restrict-annotations/restrict-annotations.yaml @@ -0,0 +1,44 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-annotations + annotations: + policies.kyverno.io/title: Restrict NGINX Ingress annotation values in CEL expressions + policies.kyverno.io/category: Security, NGINX Ingress in CEL + policies.kyverno.io/severity: high + policies.kyverno.io/subject: Ingress + policies.kyverno.io/minversion: "1.11.0" + kyverno.io/kyverno-version: "1.11.0" + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + This policy mitigates CVE-2021-25746 by restricting `metadata.annotations` to safe values. + See: https://github.com/kubernetes/ingress-nginx/blame/main/internal/ingress/inspector/rules.go. + This issue has been fixed in NGINX Ingress v1.2.0. For NGINX Ingress version 1.0.5+ the + "annotation-value-word-blocklist" configuration setting is also recommended. + Please refer to the CVE for details. +spec: + validationFailureAction: Enforce + rules: + - name: check-ingress + match: + any: + - resources: + kinds: + - networking.k8s.io/v1/Ingress + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + !has(object.metadata.annotations) || + ( + !object.metadata.annotations.exists(annotation, object.metadata.annotations[annotation].matches('\\s*alias\\s*.*;')) && + !object.metadata.annotations.exists(annotation, object.metadata.annotations[annotation].matches('\\s*root\\s*.*;')) && + !object.metadata.annotations.exists(annotation, object.metadata.annotations[annotation].matches('/etc/(passwd|shadow|group|nginx|ingress-controller)')) && + !object.metadata.annotations.exists(annotation, object.metadata.annotations[annotation].matches('/var/run/secrets')) && + !object.metadata.annotations.exists(annotation, object.metadata.annotations[annotation].matches('.*_by_lua.*')) + ) + message: "spec.rules[].http.paths[].path value is not allowed" + diff --git a/nginx-ingress-cel/restrict-ingress-paths/.chainsaw-test/chainsaw-test.yaml b/nginx-ingress-cel/restrict-ingress-paths/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..1a716aa8c --- /dev/null +++ b/nginx-ingress-cel/restrict-ingress-paths/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,23 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: restrict-ingress-paths +spec: + steps: + - name: step-01 + try: + - apply: + file: ../restrict-ingress-paths.yaml + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: ig-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: ig-bad.yaml diff --git a/nginx-ingress-cel/restrict-ingress-paths/.chainsaw-test/ig-bad.yaml b/nginx-ingress-cel/restrict-ingress-paths/.chainsaw-test/ig-bad.yaml new file mode 100644 index 000000000..b3874294d --- /dev/null +++ b/nginx-ingress-cel/restrict-ingress-paths/.chainsaw-test/ig-bad.yaml @@ -0,0 +1,85 @@ + +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: bad-path-root +spec: + rules: + - host: cafe.example.com + http: + paths: + - path: /root + pathType: Prefix + backend: + service: + name: tea-svc + port: + number: 80 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: bad-path-secrets +spec: + rules: + - host: cafe.example.com + http: + paths: + - path: /var/run/secrets + pathType: Prefix + backend: + service: + name: tea-svc + port: + number: 80 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: bad-path-etc-kubernetes +spec: + rules: + - host: cafe.example.com + http: + paths: + - path: /etc/kubernetes/admin.conf + pathType: Prefix + backend: + service: + name: tea-svc + port: + number: 80 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: bad-path-serviceaccount +spec: + rules: + - host: cafe.example.com + http: + paths: + - path: /var/run/kubernetes/serviceaccount + pathType: Prefix + backend: + service: + name: tea-svc + port: + number: 80 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: bad-path-etc +spec: + rules: + - host: example.com + http: + paths: + - path: /etc/config + pathType: Prefix + backend: + service: + name: nginx-service + port: + number: 80 \ No newline at end of file diff --git a/nginx-ingress-cel/restrict-ingress-paths/.chainsaw-test/ig-good.yaml b/nginx-ingress-cel/restrict-ingress-paths/.chainsaw-test/ig-good.yaml new file mode 100644 index 000000000..a0d35da2b --- /dev/null +++ b/nginx-ingress-cel/restrict-ingress-paths/.chainsaw-test/ig-good.yaml @@ -0,0 +1,23 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: good-paths +spec: + rules: + - host: cafe.example.com + http: + paths: + - path: /tea + pathType: Prefix + backend: + service: + name: tea-svc + port: + number: 80 + - path: /coffee + pathType: Prefix + backend: + service: + name: coffee-svc + port: + number: 80 \ No newline at end of file diff --git a/nginx-ingress-cel/restrict-ingress-paths/.chainsaw-test/policy-ready.yaml b/nginx-ingress-cel/restrict-ingress-paths/.chainsaw-test/policy-ready.yaml new file mode 100644 index 000000000..d172b2aef --- /dev/null +++ b/nginx-ingress-cel/restrict-ingress-paths/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-ingress-paths +status: + ready: true \ No newline at end of file diff --git a/nginx-ingress-cel/restrict-ingress-paths/.kyverno-test/kyverno-test.yaml b/nginx-ingress-cel/restrict-ingress-paths/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..bde8a7d90 --- /dev/null +++ b/nginx-ingress-cel/restrict-ingress-paths/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,25 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: restrict-annotations +policies: +- ../restrict-ingress-paths.yaml +resources: +- resources.yaml +results: +- kind: Ingress + policy: restrict-ingress-paths + resources: + - bad-path-root + - bad-path-etc + - bad-path-etc-kubernetes + - bad-path-serviceaccount + - bad-path-secrets + result: fail + rule: check-paths +- kind: Ingress + policy: restrict-ingress-paths + resources: + - good-paths + result: pass + rule: check-paths diff --git a/nginx-ingress-cel/restrict-ingress-paths/.kyverno-test/resources.yaml b/nginx-ingress-cel/restrict-ingress-paths/.kyverno-test/resources.yaml new file mode 100644 index 000000000..f413946e8 --- /dev/null +++ b/nginx-ingress-cel/restrict-ingress-paths/.kyverno-test/resources.yaml @@ -0,0 +1,109 @@ +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: good-paths +spec: + rules: + - host: cafe.example.com + http: + paths: + - path: /tea + pathType: Prefix + backend: + service: + name: tea-svc + port: + number: 80 + - path: /coffee + pathType: Prefix + backend: + service: + name: coffee-svc + port: + number: 80 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: bad-path-root +spec: + rules: + - host: cafe.example.com + http: + paths: + - path: /root + pathType: Prefix + backend: + service: + name: tea-svc + port: + number: 80 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: bad-path-secrets +spec: + rules: + - host: cafe.example.com + http: + paths: + - path: /var/run/secrets + pathType: Prefix + backend: + service: + name: tea-svc + port: + number: 80 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: bad-path-etc-kubernetes +spec: + rules: + - host: cafe.example.com + http: + paths: + - path: /etc/kubernetes/admin.conf + pathType: Prefix + backend: + service: + name: tea-svc + port: + number: 80 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: bad-path-serviceaccount +spec: + rules: + - host: cafe.example.com + http: + paths: + - path: /var/run/kubernetes/serviceaccount + pathType: Prefix + backend: + service: + name: tea-svc + port: + number: 80 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: bad-path-etc +spec: + rules: + - host: example.com + http: + paths: + - path: /etc/config + pathType: Prefix + backend: + service: + name: nginx-service + port: + number: 80 \ No newline at end of file diff --git a/nginx-ingress-cel/restrict-ingress-paths/artifacthub-pkg.yml b/nginx-ingress-cel/restrict-ingress-paths/artifacthub-pkg.yml new file mode 100644 index 000000000..6dc7c651c --- /dev/null +++ b/nginx-ingress-cel/restrict-ingress-paths/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: restrict-ingress-paths-cel +version: 1.0.0 +displayName: Restrict NGINX Ingress path values in CEL expressions +description: >- + This policy mitigates CVE-2021-25745 by restricting `spec.rules[].http.paths[].path` to safe values. Additional paths can be added as required. This issue has been fixed in NGINX Ingress v1.2.0. Please refer to the CVE for details. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/nginx-ingress-cel/restrict-ingress-paths/restrict-ingress-paths.yaml + ``` +keywords: + - kyverno + - Security + - NGINX Ingress + - CEL Expressions +readme: | + This policy mitigates CVE-2021-25745 by restricting `spec.rules[].http.paths[].path` to safe values. Additional paths can be added as required. This issue has been fixed in NGINX Ingress v1.2.0. Please refer to the CVE for details. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Security, NGINX Ingress in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Ingress" +digest: 1c95fe0afc73a2e5e30376d7594d98b4e58cfd21378e3ea10035742eb960220f +createdAt: "2024-05-22T07:13:08Z" diff --git a/nginx-ingress-cel/restrict-ingress-paths/restrict-ingress-paths.yaml b/nginx-ingress-cel/restrict-ingress-paths/restrict-ingress-paths.yaml new file mode 100644 index 000000000..efabf6062 --- /dev/null +++ b/nginx-ingress-cel/restrict-ingress-paths/restrict-ingress-paths.yaml @@ -0,0 +1,40 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-ingress-paths + annotations: + policies.kyverno.io/title: Restrict NGINX Ingress path values in CEL expressions + policies.kyverno.io/category: Security, NGINX Ingress in CEL + policies.kyverno.io/severity: high + policies.kyverno.io/subject: Ingress + policies.kyverno.io/minversion: "1.11.0" + kyverno.io/kyverno-version: "1.11.0" + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + This policy mitigates CVE-2021-25745 by restricting `spec.rules[].http.paths[].path` to safe values. + Additional paths can be added as required. This issue has been fixed in NGINX Ingress v1.2.0. + Please refer to the CVE for details. +spec: + validationFailureAction: Enforce + rules: + - name: check-paths + match: + any: + - resources: + kinds: + - networking.k8s.io/v1/Ingress + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + !has(object.spec.rules) || + object.spec.rules.all(rule, !has(rule.http) || !has(rule.http.paths) || + rule.http.paths.all(p, + !p.path.contains('/etc') && !p.path.contains('/var/run/secrets') && + !p.path.contains('/root') && !p.path.contains('/var/run/kubernetes/serviceaccount') && + !p.path.contains('/etc/kubernetes/admin.conf'))) + message: "spec.rules[].http.paths[].path value is not allowed" + diff --git a/nginx-ingress/restrict-ingress-paths/.chainsaw-test/ig-bad.yaml b/nginx-ingress/restrict-ingress-paths/.chainsaw-test/ig-bad.yaml index b3d09bddb..b3874294d 100644 --- a/nginx-ingress/restrict-ingress-paths/.chainsaw-test/ig-bad.yaml +++ b/nginx-ingress/restrict-ingress-paths/.chainsaw-test/ig-bad.yaml @@ -36,7 +36,7 @@ spec: apiVersion: networking.k8s.io/v1 kind: Ingress metadata: - name: bad-path-etc + name: bad-path-etc-kubernetes spec: rules: - host: cafe.example.com @@ -64,5 +64,22 @@ spec: backend: service: name: tea-svc + port: + number: 80 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: bad-path-etc +spec: + rules: + - host: example.com + http: + paths: + - path: /etc/config + pathType: Prefix + backend: + service: + name: nginx-service port: number: 80 \ No newline at end of file diff --git a/nginx-ingress/restrict-ingress-paths/.kyverno-test/kyverno-test.yaml b/nginx-ingress/restrict-ingress-paths/.kyverno-test/kyverno-test.yaml index c57aa2a7e..bde8a7d90 100644 --- a/nginx-ingress/restrict-ingress-paths/.kyverno-test/kyverno-test.yaml +++ b/nginx-ingress/restrict-ingress-paths/.kyverno-test/kyverno-test.yaml @@ -12,6 +12,7 @@ results: resources: - bad-path-root - bad-path-etc + - bad-path-etc-kubernetes - bad-path-serviceaccount - bad-path-secrets result: fail diff --git a/nginx-ingress/restrict-ingress-paths/.kyverno-test/resources.yaml b/nginx-ingress/restrict-ingress-paths/.kyverno-test/resources.yaml index 849b672e9..f413946e8 100644 --- a/nginx-ingress/restrict-ingress-paths/.kyverno-test/resources.yaml +++ b/nginx-ingress/restrict-ingress-paths/.kyverno-test/resources.yaml @@ -60,7 +60,7 @@ spec: apiVersion: networking.k8s.io/v1 kind: Ingress metadata: - name: bad-path-etc + name: bad-path-etc-kubernetes spec: rules: - host: cafe.example.com @@ -88,5 +88,22 @@ spec: backend: service: name: tea-svc + port: + number: 80 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: bad-path-etc +spec: + rules: + - host: example.com + http: + paths: + - path: /etc/config + pathType: Prefix + backend: + service: + name: nginx-service port: number: 80 \ No newline at end of file diff --git a/openshift-cel/check-routes/.chainsaw-test/chainsaw-test.yaml b/openshift-cel/check-routes/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..14b19b50c --- /dev/null +++ b/openshift-cel/check-routes/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,23 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: check-routes +spec: + steps: + - name: step-01 + try: + - apply: + file: ../check-routes.yaml + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: route-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: route-bad.yaml diff --git a/openshift-cel/check-routes/.chainsaw-test/policy-ready.yaml b/openshift-cel/check-routes/.chainsaw-test/policy-ready.yaml new file mode 100644 index 000000000..7620a92af --- /dev/null +++ b/openshift-cel/check-routes/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: check-routes +status: + ready: true \ No newline at end of file diff --git a/openshift-cel/check-routes/.chainsaw-test/route-bad.yaml b/openshift-cel/check-routes/.chainsaw-test/route-bad.yaml new file mode 100644 index 000000000..9411e209e --- /dev/null +++ b/openshift-cel/check-routes/.chainsaw-test/route-bad.yaml @@ -0,0 +1,12 @@ +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + name: hello-openshift-http +spec: + host: hello-openshift-hello-openshift.mydomain + port: + targetPort: 8080 + to: + kind: Service + name: hello-openshift +--- \ No newline at end of file diff --git a/openshift-cel/check-routes/.chainsaw-test/route-good.yaml b/openshift-cel/check-routes/.chainsaw-test/route-good.yaml new file mode 100644 index 000000000..c9ee97efe --- /dev/null +++ b/openshift-cel/check-routes/.chainsaw-test/route-good.yaml @@ -0,0 +1,66 @@ +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + name: frontend +spec: + host: www.example.com + to: + kind: Service + name: frontend + tls: + termination: reencrypt + key: |- + -----BEGIN PRIVATE KEY----- + [...] + -----END PRIVATE KEY----- + certificate: |- + -----BEGIN CERTIFICATE----- + [...] + -----END CERTIFICATE----- + caCertificate: |- + -----BEGIN CERTIFICATE----- + [...] + -----END CERTIFICATE----- + destinationCACertificate: |- + -----BEGIN CERTIFICATE----- + [...] + -----END CERTIFICATE----- +--- +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + name: frontend-edge +spec: + host: www.example.com + to: + kind: Service + name: frontend + tls: + termination: edge + key: |- + -----BEGIN PRIVATE KEY----- + [...] + -----END PRIVATE KEY----- + certificate: |- + -----BEGIN CERTIFICATE----- + [...] + -----END CERTIFICATE----- + caCertificate: |- + -----BEGIN CERTIFICATE----- + [...] + -----END CERTIFICATE----- +--- +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + name: route-passthrough-secured +spec: + host: www.example.com + port: + targetPort: 8080 + tls: + termination: passthrough + insecureEdgeTerminationPolicy: None + to: + kind: Service + name: frontend diff --git a/openshift-cel/check-routes/.kyverno-test/kyverno-test.yaml b/openshift-cel/check-routes/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..caf9d781b --- /dev/null +++ b/openshift-cel/check-routes/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,23 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: check-routes +policies: +- ../check-routes.yaml +resources: +- resources.yaml +results: +- kind: Route + policy: check-routes + resources: + - hello-openshift-http + result: fail + rule: require-tls-routes +- kind: Route + policy: check-routes + resources: + - frontend + - frontend-edge + - route-passthrough-secured + result: pass + rule: require-tls-routes diff --git a/openshift-cel/check-routes/.kyverno-test/resources.yaml b/openshift-cel/check-routes/.kyverno-test/resources.yaml new file mode 100644 index 000000000..dd21c42b5 --- /dev/null +++ b/openshift-cel/check-routes/.kyverno-test/resources.yaml @@ -0,0 +1,78 @@ +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + name: hello-openshift-http +spec: + host: hello-openshift-hello-openshift.mydomain + port: + targetPort: 8080 + to: + kind: Service + name: hello-openshift +--- +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + name: frontend +spec: + host: www.example.com + to: + kind: Service + name: frontend + tls: + termination: reencrypt + key: |- + -----BEGIN PRIVATE KEY----- + [...] + -----END PRIVATE KEY----- + certificate: |- + -----BEGIN CERTIFICATE----- + [...] + -----END CERTIFICATE----- + caCertificate: |- + -----BEGIN CERTIFICATE----- + [...] + -----END CERTIFICATE----- + destinationCACertificate: |- + -----BEGIN CERTIFICATE----- + [...] + -----END CERTIFICATE----- +--- +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + name: frontend-edge +spec: + host: www.example.com + to: + kind: Service + name: frontend + tls: + termination: edge + key: |- + -----BEGIN PRIVATE KEY----- + [...] + -----END PRIVATE KEY----- + certificate: |- + -----BEGIN CERTIFICATE----- + [...] + -----END CERTIFICATE----- + caCertificate: |- + -----BEGIN CERTIFICATE----- + [...] + -----END CERTIFICATE----- +--- +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + name: route-passthrough-secured +spec: + host: www.example.com + port: + targetPort: 8080 + tls: + termination: passthrough + insecureEdgeTerminationPolicy: None + to: + kind: Service + name: frontend diff --git a/openshift-cel/check-routes/artifacthub-pkg.yml b/openshift-cel/check-routes/artifacthub-pkg.yml new file mode 100644 index 000000000..2eb85d6da --- /dev/null +++ b/openshift-cel/check-routes/artifacthub-pkg.yml @@ -0,0 +1,23 @@ +name: check-routes-cel +version: 1.0.0 +displayName: Require TLS routes in OpenShift in CEL expressions +description: >- + HTTP traffic is not encrypted and hence insecure. This policy prevents configuration of OpenShift HTTP routes. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/openshift-cel/check-routes/check-routes.yaml + ``` +keywords: + - kyverno + - OpenShift + - CEL Expressions +readme: | + HTTP traffic is not encrypted and hence insecure. This policy prevents configuration of OpenShift HTTP routes. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "OpenShift in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Route" +digest: ac5ffb379d599adbf5ec0f2b08c76539b696645f6dee7a05f4b9a75b939243f8 +createdAt: "2024-05-22T07:21:10Z" diff --git a/openshift-cel/check-routes/check-routes.yaml b/openshift-cel/check-routes/check-routes.yaml new file mode 100644 index 000000000..7ec6e0d43 --- /dev/null +++ b/openshift-cel/check-routes/check-routes.yaml @@ -0,0 +1,34 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: check-routes + annotations: + policies.kyverno.io/title: Require TLS routes in OpenShift in CEL expressions + policies.kyverno.io/category: OpenShift in CEL expressions + policies.kyverno.io/severity: high + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Route + policies.kyverno.io/description: |- + HTTP traffic is not encrypted and hence insecure. This policy prevents configuration of OpenShift HTTP routes. +spec: + validationFailureAction: Enforce + background: true + rules: + - name: require-tls-routes + match: + any: + - resources: + kinds: + - route.openshift.io/v1/Route + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "has(object.spec.tls)" + message: >- + HTTP routes are not allowed. Configure TLS for secure routes. + diff --git a/openshift-cel/disallow-deprecated-apis/.kyverno-test/kyverno-test.yaml b/openshift-cel/disallow-deprecated-apis/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..8d17d1927 --- /dev/null +++ b/openshift-cel/disallow-deprecated-apis/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,33 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: check-routes +policies: +- ../disallow-deprecated-apis.yaml +resources: +- resources.yaml +results: +- kind: ClusterRole + policy: disallow-deprecated-apis + resources: + - openshift-cluster-role-deprecated + result: fail + rule: check-deprecated-apis +- kind: ClusterRoleBinding + policy: disallow-deprecated-apis + resources: + - openshift-cluster-role-binding-deprecated + result: fail + rule: check-deprecated-apis +- kind: Role + policy: disallow-deprecated-apis + resources: + - openshift-role-deprecated + result: fail + rule: check-deprecated-apis +- kind: RoleBinding + policy: disallow-deprecated-apis + resources: + - openshift-role-binding-deprecated + result: fail + rule: check-deprecated-apis diff --git a/openshift-cel/disallow-deprecated-apis/.kyverno-test/resources.yaml b/openshift-cel/disallow-deprecated-apis/.kyverno-test/resources.yaml new file mode 100644 index 000000000..5f37e352a --- /dev/null +++ b/openshift-cel/disallow-deprecated-apis/.kyverno-test/resources.yaml @@ -0,0 +1,89 @@ +apiVersion: authorization.openshift.io/v1 +kind: ClusterRole +metadata: + name: openshift-cluster-role-deprecated +spec: +- apiGroups: [""] + resources: ["pods"] + verbs: ["get", "watch", "list"] +--- +apiVersion: authorization.openshift.io/v1 +kind: ClusterRoleBinding +metadata: + name: openshift-cluster-role-binding-deprecated +subjects: +- kind: User + name: jane # "name" is case sensitive + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: Role + name: openshift-cluster-role-deprecated + apiGroup: authorization.openshift.io/v1 +--- +apiVersion: authorization.openshift.io/v1 +kind: Role +metadata: + name: openshift-role-deprecated +spec: +- apiGroups: [""] + resources: ["pods"] + verbs: ["get", "watch", "list"] +--- +apiVersion: authorization.openshift.io/v1 +kind: RoleBinding +metadata: + name: openshift-role-binding-deprecated + namespace: default +subjects: +- kind: User + name: jane # "name" is case sensitive + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: Role + name: openshift-role-deprecated + apiGroup: authorization.openshift.io/v1 +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: openshift-cluster-role-valid +spec: +- apiGroups: [""] + resources: ["pods"] + verbs: ["get", "watch", "list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: openshift-cluster-role-binding +subjects: +- kind: User + name: jane # "name" is case sensitive + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: Role + name: openshift-cluster-role + apiGroup: rbac.authorization.k8s.io/v1 +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: openshift-role +spec: +- apiGroups: [""] + resources: ["pods"] + verbs: ["get", "watch", "list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: openshift-role-binding + namespace: default +subjects: +- kind: User + name: jane # "name" is case sensitive + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: Role + name: openshift-role + apiGroup: rbac.authorization.k8s.io/v1 diff --git a/openshift-cel/disallow-deprecated-apis/artifacthub-pkg.yml b/openshift-cel/disallow-deprecated-apis/artifacthub-pkg.yml new file mode 100644 index 000000000..7cf0c78f2 --- /dev/null +++ b/openshift-cel/disallow-deprecated-apis/artifacthub-pkg.yml @@ -0,0 +1,23 @@ +name: disallow-deprecated-apis-cel +version: 1.0.0 +displayName: Disallow deprecated APIs in CEL expressions +description: >- + OpenShift APIs are sometimes deprecated and removed after a few releases. As a best practice, older API versions should be replaced with newer versions. This policy validates for APIs that are deprecated or scheduled for removal. Note that checking for some of these resources may require modifying the Kyverno ConfigMap to remove filters. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/openshift-cel/disallow-deprecated-apis/disallow-deprecated-apis.yaml + ``` +keywords: + - kyverno + - OpenShift + - CEL Expressions +readme: | + OpenShift APIs are sometimes deprecated and removed after a few releases. As a best practice, older API versions should be replaced with newer versions. This policy validates for APIs that are deprecated or scheduled for removal. Note that checking for some of these resources may require modifying the Kyverno ConfigMap to remove filters. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "OpenShift in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "ClusterRole,ClusterRoleBinding,Role,RoleBinding,RBAC" +digest: 8ca91e6472908c67621b07b800023ff5e93383dbd9fd2d0f90879506cec45dd7 +createdAt: "2024-05-22T07:36:55Z" diff --git a/openshift-cel/disallow-deprecated-apis/disallow-deprecated-apis.yaml b/openshift-cel/disallow-deprecated-apis/disallow-deprecated-apis.yaml new file mode 100644 index 000000000..984f03178 --- /dev/null +++ b/openshift-cel/disallow-deprecated-apis/disallow-deprecated-apis.yaml @@ -0,0 +1,41 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: disallow-deprecated-apis + annotations: + policies.kyverno.io/title: Disallow deprecated APIs in CEL expressions + policies.kyverno.io/category: OpenShift in CEL + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: ClusterRole,ClusterRoleBinding,Role,RoleBinding,RBAC + policies.kyverno.io/description: >- + OpenShift APIs are sometimes deprecated and removed after a few releases. + As a best practice, older API versions should be replaced with newer versions. + This policy validates for APIs that are deprecated or scheduled for removal. + Note that checking for some of these resources may require modifying the Kyverno + ConfigMap to remove filters. +spec: + validationFailureAction: Enforce + background: true + rules: + - name: check-deprecated-apis + match: + any: + - resources: + kinds: + - authorization.openshift.io/v1/ClusterRole + - authorization.openshift.io/v1/ClusterRoleBinding + - authorization.openshift.io/v1/Role + - authorization.openshift.io/v1/RoleBinding + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "false" + messageExpression: >- + object.apiVersion + '/' + object.kind + ' is deprecated.' + diff --git a/openshift-cel/disallow-jenkins-pipeline-strategy/.kyverno-test/kyverno-test.yaml b/openshift-cel/disallow-jenkins-pipeline-strategy/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..4aadc467a --- /dev/null +++ b/openshift-cel/disallow-jenkins-pipeline-strategy/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,23 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: check-policy +policies: +- ../disallow-jenkins-pipeline-strategy.yaml +resources: +- resources.yaml +results: +- kind: BuildConfig + policy: disallow-jenkins-pipeline-strategy + resources: + - sample-jenkins-pipeline + - sample-jenkins-pipeline-new + result: fail + rule: check-build-strategy +- kind: BuildConfig + policy: disallow-jenkins-pipeline-strategy + resources: + - sample-pipeline-no-jenkins + - sample-pipeline-no-jenkins-new + result: pass + rule: check-build-strategy diff --git a/openshift-cel/disallow-jenkins-pipeline-strategy/.kyverno-test/resources.yaml b/openshift-cel/disallow-jenkins-pipeline-strategy/.kyverno-test/resources.yaml new file mode 100644 index 000000000..335e2e20d --- /dev/null +++ b/openshift-cel/disallow-jenkins-pipeline-strategy/.kyverno-test/resources.yaml @@ -0,0 +1,43 @@ +kind: "BuildConfig" +apiVersion: "v1" +metadata: + name: "sample-jenkins-pipeline" +spec: + source: + git: + uri: "https://github.com/openshift/ruby-hello-world" + strategy: + jenkinsPipelineStrategy: + jenkinsfilePath: some/repo/dir/filename +--- +kind: "BuildConfig" +apiVersion: "v1" +metadata: + name: "sample-pipeline-no-jenkins" +spec: + source: + git: + uri: "https://github.com/openshift/ruby-hello-world" + strategy: {} +--- +kind: "BuildConfig" +apiVersion: "build.openshift.io/v1" +metadata: + name: "sample-jenkins-pipeline-new" +spec: + source: + git: + uri: "https://github.com/openshift/ruby-hello-world" + strategy: + jenkinsPipelineStrategy: + jenkinsfilePath: some/repo/dir/filename +--- +kind: "BuildConfig" +apiVersion: "build.openshift.io/v1" +metadata: + name: "sample-pipeline-no-jenkins-new" +spec: + source: + git: + uri: "https://github.com/openshift/ruby-hello-world" + strategy: {} diff --git a/openshift-cel/disallow-jenkins-pipeline-strategy/artifacthub-pkg.yml b/openshift-cel/disallow-jenkins-pipeline-strategy/artifacthub-pkg.yml new file mode 100644 index 000000000..10065d31e --- /dev/null +++ b/openshift-cel/disallow-jenkins-pipeline-strategy/artifacthub-pkg.yml @@ -0,0 +1,23 @@ +name: disallow-jenkins-pipeline-strategy-cel +version: 1.0.0 +displayName: Disallow OpenShift Jenkins Pipeline Build Strategy in CEL expressions +description: >- + The Jenkins Pipeline Build Strategy has been deprecated. This policy prevents its use. Use OpenShift Pipelines instead. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/openshift-cel/disallow-jenkins-pipeline-strategy/disallow-jenkins-pipeline-strategy.yaml + ``` +keywords: + - kyverno + - OpenShift + - CEL Expressions +readme: | + The Jenkins Pipeline Build Strategy has been deprecated. This policy prevents its use. Use OpenShift Pipelines instead. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "OpenShift in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "BuildConfig" +digest: f38131a30e03d633c0745ae181e83c75ae94a180d1e0402a8cba26ccf8088f81 +createdAt: "2024-05-22T09:40:45Z" diff --git a/openshift-cel/disallow-jenkins-pipeline-strategy/disallow-jenkins-pipeline-strategy.yaml b/openshift-cel/disallow-jenkins-pipeline-strategy/disallow-jenkins-pipeline-strategy.yaml new file mode 100644 index 000000000..2857aa488 --- /dev/null +++ b/openshift-cel/disallow-jenkins-pipeline-strategy/disallow-jenkins-pipeline-strategy.yaml @@ -0,0 +1,35 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: disallow-jenkins-pipeline-strategy + annotations: + policies.kyverno.io/title: Disallow OpenShift Jenkins Pipeline Build Strategy in CEL expressions + policies.kyverno.io/category: OpenShift in CEL + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: BuildConfig + policies.kyverno.io/description: >- + The Jenkins Pipeline Build Strategy has been deprecated. This policy prevents its use. Use OpenShift Pipelines instead. +spec: + validationFailureAction: Enforce + background: true + rules: + - name: check-build-strategy + match: + any: + - resources: + kinds: + - v1/BuildConfig + - build.openshift.io/v1/BuildConfig + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "!has(object.spec.strategy.jenkinsPipelineStrategy)" + message: >- + Jenkins Pipeline Build Strategy has been deprecated and is not allowed + diff --git a/openshift-cel/disallow-security-context-constraint-anyuid/.chainsaw-test/chainsaw-test.yaml b/openshift-cel/disallow-security-context-constraint-anyuid/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..2b8304268 --- /dev/null +++ b/openshift-cel/disallow-security-context-constraint-anyuid/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,44 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: disallow-security-context-constraint-anyuid +spec: + steps: + - name: step-01 + try: + - apply: + file: ../disallow-security-context-constraint-anyuid.yaml + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: roles-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: roles-bad.yaml + - apply: + file: clusterroles-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: clusterroles-bad.yaml + - apply: + file: rb-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: rb-bad.yaml + - apply: + file: crb-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: crb-bad.yaml diff --git a/openshift-cel/disallow-security-context-constraint-anyuid/.chainsaw-test/clusterroles-bad.yaml b/openshift-cel/disallow-security-context-constraint-anyuid/.chainsaw-test/clusterroles-bad.yaml new file mode 100644 index 000000000..8c5bdbd84 --- /dev/null +++ b/openshift-cel/disallow-security-context-constraint-anyuid/.chainsaw-test/clusterroles-bad.yaml @@ -0,0 +1,34 @@ + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: crole-bad01 +rules: +- apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "watch", "list"] +- apiGroups: + - security.openshift.io + resourceNames: + - anyuid + resources: + - securitycontextconstraints + verbs: + - use +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: crole-bad02 +rules: +- apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "watch", "list"] +- apiGroups: + - security.openshift.io + resourceNames: + - anyuid + resources: + - securitycontextconstraints + verbs: + - "*" diff --git a/openshift-cel/disallow-security-context-constraint-anyuid/.chainsaw-test/clusterroles-good.yaml b/openshift-cel/disallow-security-context-constraint-anyuid/.chainsaw-test/clusterroles-good.yaml new file mode 100644 index 000000000..e03d3c81d --- /dev/null +++ b/openshift-cel/disallow-security-context-constraint-anyuid/.chainsaw-test/clusterroles-good.yaml @@ -0,0 +1,8 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: crole-good01 +rules: +- apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "watch", "list"] \ No newline at end of file diff --git a/openshift-cel/disallow-security-context-constraint-anyuid/.chainsaw-test/crb-bad.yaml b/openshift-cel/disallow-security-context-constraint-anyuid/.chainsaw-test/crb-bad.yaml new file mode 100644 index 000000000..a3ccd78f9 --- /dev/null +++ b/openshift-cel/disallow-security-context-constraint-anyuid/.chainsaw-test/crb-bad.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: clusterrolebinding-bad01 +subjects: +- kind: Group + name: manager + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: system:openshift:scc:anyuid + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/openshift-cel/disallow-security-context-constraint-anyuid/.chainsaw-test/crb-good.yaml b/openshift-cel/disallow-security-context-constraint-anyuid/.chainsaw-test/crb-good.yaml new file mode 100644 index 000000000..b49a62cbf --- /dev/null +++ b/openshift-cel/disallow-security-context-constraint-anyuid/.chainsaw-test/crb-good.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: clusterrolebinding-good01 +subjects: +- kind: Group + name: manager + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: view + apiGroup: rbac.authorization.k8s.io diff --git a/openshift-cel/disallow-security-context-constraint-anyuid/.chainsaw-test/policy-ready.yaml b/openshift-cel/disallow-security-context-constraint-anyuid/.chainsaw-test/policy-ready.yaml new file mode 100644 index 000000000..ed6d92cb9 --- /dev/null +++ b/openshift-cel/disallow-security-context-constraint-anyuid/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: disallow-security-context-constraint-anyuid +status: + ready: true \ No newline at end of file diff --git a/openshift-cel/disallow-security-context-constraint-anyuid/.chainsaw-test/rb-bad.yaml b/openshift-cel/disallow-security-context-constraint-anyuid/.chainsaw-test/rb-bad.yaml new file mode 100644 index 000000000..378129584 --- /dev/null +++ b/openshift-cel/disallow-security-context-constraint-anyuid/.chainsaw-test/rb-bad.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: rolebinding-bad01 +subjects: +- kind: User + name: dave + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: system:openshift:scc:anyuid + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/openshift-cel/disallow-security-context-constraint-anyuid/.chainsaw-test/rb-good.yaml b/openshift-cel/disallow-security-context-constraint-anyuid/.chainsaw-test/rb-good.yaml new file mode 100644 index 000000000..811d5d7c2 --- /dev/null +++ b/openshift-cel/disallow-security-context-constraint-anyuid/.chainsaw-test/rb-good.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: rolebinding-good01 +subjects: +- kind: User + name: dave + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: view + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/openshift-cel/disallow-security-context-constraint-anyuid/.chainsaw-test/roles-bad.yaml b/openshift-cel/disallow-security-context-constraint-anyuid/.chainsaw-test/roles-bad.yaml new file mode 100644 index 000000000..984b8cf8b --- /dev/null +++ b/openshift-cel/disallow-security-context-constraint-anyuid/.chainsaw-test/roles-bad.yaml @@ -0,0 +1,35 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: pod-role-bad01 +rules: +- apiGroups: [""] + resources: ["pods"] + verbs: ["get", "watch", "list"] +- apiGroups: + - security.openshift.io + resourceNames: + - anyuid + resources: + - securitycontextconstraints + verbs: + - use +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + namespace: default + name: pod-role-bad02 +rules: +- apiGroups: [""] + resources: ["pods"] + verbs: ["get", "watch", "list"] +- apiGroups: + - security.openshift.io + resourceNames: + - anyuid + resources: + - securitycontextconstraints + verbs: + - "*" +--- \ No newline at end of file diff --git a/openshift-cel/disallow-security-context-constraint-anyuid/.chainsaw-test/roles-good.yaml b/openshift-cel/disallow-security-context-constraint-anyuid/.chainsaw-test/roles-good.yaml new file mode 100644 index 000000000..34c8d7a54 --- /dev/null +++ b/openshift-cel/disallow-security-context-constraint-anyuid/.chainsaw-test/roles-good.yaml @@ -0,0 +1,8 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: good-role01 +rules: +- apiGroups: [""] + resources: ["pods"] + verbs: ["get", "watch", "list"] \ No newline at end of file diff --git a/openshift-cel/disallow-security-context-constraint-anyuid/.kyverno-test/kyverno-test.yaml b/openshift-cel/disallow-security-context-constraint-anyuid/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..36c55e3bf --- /dev/null +++ b/openshift-cel/disallow-security-context-constraint-anyuid/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,59 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: check-routes +policies: +- ../disallow-security-context-constraint-anyuid.yaml +resources: +- resources.yaml +results: +- kind: ClusterRole + policy: disallow-security-context-constraint-anyuid + resources: + - secret-reader-anyuid-use + - secret-reader-anyuid-all + result: fail + rule: check-security-context-constraint +- kind: Role + policy: disallow-security-context-constraint-anyuid + resources: + - pod-role-anyuid-use + - pod-role-anyuid-all + result: fail + rule: check-security-context-constraint +- kind: ClusterRole + policy: disallow-security-context-constraint-anyuid + resources: + - secret-reader + result: pass + rule: check-security-context-constraint +- kind: Role + policy: disallow-security-context-constraint-anyuid + resources: + - pod-role + result: pass + rule: check-security-context-constraint +- kind: ClusterRoleBinding + policy: disallow-security-context-constraint-anyuid + resources: + - clusterrolebinding-anyuid + result: fail + rule: check-security-context-roleref +- kind: RoleBinding + policy: disallow-security-context-constraint-anyuid + resources: + - rolebinding-anyuid + result: fail + rule: check-security-context-roleref +- kind: ClusterRoleBinding + policy: disallow-security-context-constraint-anyuid + resources: + - clusterrolebinding-test + result: pass + rule: check-security-context-roleref +- kind: RoleBinding + policy: disallow-security-context-constraint-anyuid + resources: + - rolebinding-test + result: pass + rule: check-security-context-roleref diff --git a/openshift-cel/disallow-security-context-constraint-anyuid/.kyverno-test/resources.yaml b/openshift-cel/disallow-security-context-constraint-anyuid/.kyverno-test/resources.yaml new file mode 100644 index 000000000..0ce3f58be --- /dev/null +++ b/openshift-cel/disallow-security-context-constraint-anyuid/.kyverno-test/resources.yaml @@ -0,0 +1,154 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + namespace: default + name: pod-role-anyuid-use +rules: +- apiGroups: [""] # "" indicates the core API group + resources: ["pods"] + verbs: ["get", "watch", "list"] +- apiGroups: + - security.openshift.io + resourceNames: + - anyuid + resources: + - securitycontextconstraints + verbs: + - use +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + namespace: default + name: pod-role +rules: +- apiGroups: [""] # "" indicates the core API group + resources: ["pods"] + verbs: ["get", "watch", "list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + namespace: default + name: pod-role-anyuid-all +rules: +- apiGroups: [""] # "" indicates the core API group + resources: ["pods"] + verbs: ["get", "watch", "list"] +- apiGroups: + - security.openshift.io + resourceNames: + - anyuid + resources: + - securitycontextconstraints + verbs: + - "*" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + # "namespace" omitted since ClusterRoles are not namespaced + name: secret-reader-anyuid-use +rules: +- apiGroups: [""] + # + # at the HTTP level, the name of the resource for accessing Secret + # objects is "secrets" + resources: ["secrets"] + verbs: ["get", "watch", "list"] +- apiGroups: + - security.openshift.io + resourceNames: + - anyuid + resources: + - securitycontextconstraints + verbs: + - use +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + # "namespace" omitted since ClusterRoles are not namespaced + name: secret-reader +rules: +- apiGroups: [""] + # + # at the HTTP level, the name of the resource for accessing Secret + # objects is "secrets" + resources: ["secrets"] + verbs: ["get", "watch", "list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + # "namespace" omitted since ClusterRoles are not namespaced + name: secret-reader-anyuid-all +rules: +- apiGroups: [""] + # + # at the HTTP level, the name of the resource for accessing Secret + # objects is "secrets" + resources: ["secrets"] + verbs: ["get", "watch", "list"] +- apiGroups: + - security.openshift.io + resourceNames: + - anyuid + resources: + - securitycontextconstraints + verbs: + - "*" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: rolebinding-anyuid + namespace: development +subjects: +- kind: User + name: dave # Name is case sensitive + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: system:openshift:scc:anyuid + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: clusterrolebinding-anyuid +subjects: +- kind: Group + name: manager # Name is case sensitive + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: system:openshift:scc:anyuid + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: rolebinding-test + namespace: development +subjects: +- kind: User + name: dave # Name is case sensitive + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: view + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: clusterrolebinding-test +subjects: +- kind: Group + name: manager # Name is case sensitive + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: view + apiGroup: rbac.authorization.k8s.io diff --git a/openshift-cel/disallow-security-context-constraint-anyuid/artifacthub-pkg.yml b/openshift-cel/disallow-security-context-constraint-anyuid/artifacthub-pkg.yml new file mode 100644 index 000000000..69839fe57 --- /dev/null +++ b/openshift-cel/disallow-security-context-constraint-anyuid/artifacthub-pkg.yml @@ -0,0 +1,23 @@ +name: disallow-security-context-constraint-anyuid-cel +version: 1.0.0 +displayName: Disallow use of the SecurityContextConstraint (SCC) anyuid in CEL expressions +description: >- + Disallow the use of the SecurityContextConstraint (SCC) anyuid which allows a pod to run with the UID as declared in the image instead of a random UID +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/openshift-cel/disallow-security-context-constraint-anyuid/disallow-security-context-constraint-anyuid.yaml + ``` +keywords: + - kyverno + - Security + - CEL Expressions +readme: | + Disallow the use of the SecurityContextConstraint (SCC) anyuid which allows a pod to run with the UID as declared in the image instead of a random UID + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Security in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Role,ClusterRole,RBAC" +digest: a12e5cbb7ee88722774bf06d5c086804b4e3151811088be926470b12b8920cf0 +createdAt: "2024-05-22T09:53:47Z" diff --git a/openshift-cel/disallow-security-context-constraint-anyuid/disallow-security-context-constraint-anyuid.yaml b/openshift-cel/disallow-security-context-constraint-anyuid/disallow-security-context-constraint-anyuid.yaml new file mode 100644 index 000000000..d37d4c72d --- /dev/null +++ b/openshift-cel/disallow-security-context-constraint-anyuid/disallow-security-context-constraint-anyuid.yaml @@ -0,0 +1,51 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: disallow-security-context-constraint-anyuid + annotations: + policies.kyverno.io/title: Disallow use of the SecurityContextConstraint (SCC) anyuid in CEL expressions + policies.kyverno.io/category: Security in CEL + policies.kyverno.io/severity: high + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Role,ClusterRole,RBAC + policies.kyverno.io/description: >- + Disallow the use of the SecurityContextConstraint (SCC) anyuid which allows a pod to run with the UID as declared in the image instead of a random UID +spec: + validationFailureAction: Enforce + background: true + rules: + - name: check-security-context-constraint + match: + any: + - resources: + kinds: + - ClusterRole + - Role + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "!has(object.rules) || !object.rules.exists(rule, 'anyuid' in rule.resourceNames && ('use' in rule.verbs || '*' in rule.verbs))" + message: >- + Use of the SecurityContextConstraint (SCC) anyuid is not allowed + - name: check-security-context-roleref + match: + any: + - resources: + kinds: + - ClusterRoleBinding + - RoleBinding + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "object.roleRef.name != 'system:openshift:scc:anyuid'" + message: >- + Use of the SecurityContextConstraint (SCC) anyuid is not allowed + diff --git a/openshift-cel/enforce-etcd-encryption/.kyverno-test/kyverno-test.yaml b/openshift-cel/enforce-etcd-encryption/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..2cf4b765d --- /dev/null +++ b/openshift-cel/enforce-etcd-encryption/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,21 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: check-policy +policies: +- ../enforce-etcd-encryption.yaml +resources: +- resources.yaml +results: +- kind: APIServer + policy: enforce-etcd-encryption + resources: + - cluster-no-encryption + result: fail + rule: check-etcd-encryption +- kind: APIServer + policy: enforce-etcd-encryption + resources: + - cluster-with-encryption + result: pass + rule: check-etcd-encryption diff --git a/openshift-cel/enforce-etcd-encryption/.kyverno-test/resources.yaml b/openshift-cel/enforce-etcd-encryption/.kyverno-test/resources.yaml new file mode 100644 index 000000000..442468ad2 --- /dev/null +++ b/openshift-cel/enforce-etcd-encryption/.kyverno-test/resources.yaml @@ -0,0 +1,16 @@ +apiVersion: config.openshift.io/v1 +kind: APIServer +metadata: + annotations: + release.openshift.io/create-only: "true" + name: cluster-no-encryption +spec: {} +--- +apiVersion: config.openshift.io/v1 +kind: APIServer +metadata: + annotations: + release.openshift.io/create-only: "true" + name: cluster-with-encryption +spec: + encryption: {} diff --git a/openshift-cel/enforce-etcd-encryption/artifacthub-pkg.yml b/openshift-cel/enforce-etcd-encryption/artifacthub-pkg.yml new file mode 100644 index 000000000..a5198a02d --- /dev/null +++ b/openshift-cel/enforce-etcd-encryption/artifacthub-pkg.yml @@ -0,0 +1,23 @@ +name: enforce-etcd-encryption-cel +version: 1.0.0 +displayName: Enforce etcd encryption in OpenShift in CEL expressions +description: >- + Encryption at rest is a security best practice. This policy ensures encryption is enabled for etcd in OpenShift clusters. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/openshift-cel/enforce-etcd-encryption/enforce-etcd-encryption.yaml + ``` +keywords: + - kyverno + - OpenShift + - CEL Expressions +readme: | + Encryption at rest is a security best practice. This policy ensures encryption is enabled for etcd in OpenShift clusters. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "OpenShift in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "APIServer" +digest: 293113b4abad7fea2e4d805bc63dc23c8fad9658424353644e501ac5b0abd592 +createdAt: "2024-05-22T10:15:24Z" diff --git a/openshift-cel/enforce-etcd-encryption/enforce-etcd-encryption.yaml b/openshift-cel/enforce-etcd-encryption/enforce-etcd-encryption.yaml new file mode 100644 index 000000000..045402624 --- /dev/null +++ b/openshift-cel/enforce-etcd-encryption/enforce-etcd-encryption.yaml @@ -0,0 +1,34 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: enforce-etcd-encryption + annotations: + policies.kyverno.io/title: Enforce etcd encryption in OpenShift in CEL expressions + policies.kyverno.io/category: OpenShift + policies.kyverno.io/severity: high + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: APIServer + policies.kyverno.io/description: >- + Encryption at rest is a security best practice. This policy ensures encryption is enabled for etcd in OpenShift clusters. +spec: + validationFailureAction: Enforce + background: true + rules: + - name: check-etcd-encryption + match: + any: + - resources: + kinds: + - config.openshift.io/v1/APIServer + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "has(object.spec.encryption)" + message: >- + Encryption should be enabled for etcd + diff --git a/other-cel/advanced-restrict-image-registries/.chainsaw-test/chainsaw-test.yaml b/other-cel/advanced-restrict-image-registries/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..19705bdc2 --- /dev/null +++ b/other-cel/advanced-restrict-image-registries/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,48 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: advanced-restrict-image-registries +spec: + # disable templating because it can cause issues with CEL expressions + template: false + steps: + - name: step-01 + try: + - apply: + file: ../advanced-restrict-image-registries.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: advanced-restrict-image-registries + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: ns-01.yaml + - apply: + file: ns-02.yaml + - apply: + file: cm.yaml + - name: step-03 + try: + - apply: + file: pod-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: pod-bad.yaml + - apply: + file: podcontroller-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: podcontroller-bad.yaml diff --git a/other-cel/advanced-restrict-image-registries/.chainsaw-test/cm.yaml b/other-cel/advanced-restrict-image-registries/.chainsaw-test/cm.yaml new file mode 100755 index 000000000..fdad1c734 --- /dev/null +++ b/other-cel/advanced-restrict-image-registries/.chainsaw-test/cm.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +data: + registries: ghcr.io/ +kind: ConfigMap +metadata: + name: clusterregistries + namespace: default diff --git a/other-cel/advanced-restrict-image-registries/.chainsaw-test/ns-01.yaml b/other-cel/advanced-restrict-image-registries/.chainsaw-test/ns-01.yaml new file mode 100755 index 000000000..30c99ca14 --- /dev/null +++ b/other-cel/advanced-restrict-image-registries/.chainsaw-test/ns-01.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + annotations: + corp.com/allowed-registries: img.corp.com/ + name: imageregistries-ns01 diff --git a/other-cel/advanced-restrict-image-registries/.chainsaw-test/ns-02.yaml b/other-cel/advanced-restrict-image-registries/.chainsaw-test/ns-02.yaml new file mode 100755 index 000000000..3a301353b --- /dev/null +++ b/other-cel/advanced-restrict-image-registries/.chainsaw-test/ns-02.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + annotations: + corp.com/allowed-registries: docker.io/ + name: imageregistries-ns02 diff --git a/other-cel/advanced-restrict-image-registries/.chainsaw-test/pod-bad.yaml b/other-cel/advanced-restrict-image-registries/.chainsaw-test/pod-bad.yaml new file mode 100644 index 000000000..647879b83 --- /dev/null +++ b/other-cel/advanced-restrict-image-registries/.chainsaw-test/pod-bad.yaml @@ -0,0 +1,55 @@ +apiVersion: v1 +kind: Pod +metadata: + name: badpod01 +spec: + containers: + - name: busybox01 + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod02 + namespace: imageregistries-ns01 +spec: + initContainers: + - name: busybox01-init + image: busybox:1.35 + - name: busybox02-init + image: ghcr.io/busybox:1.35 + containers: + - name: busybox01 + image: ghcr.io/busybox:1.35 + - name: busybox02 + image: corp.img.io/busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod03 + namespace: imageregistries-ns02 +spec: + initContainers: + - name: busybox01-init + image: corp.img.io/busybox:1.35 + containers: + - name: busybox01 + image: img.corp.com/busybox:1.35 + - name: busybox02 + image: docker.io/busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod04 +spec: + initContainers: + - name: busybox01-init + image: corp.img.io/busybox:1.35 + containers: + - name: busybox01 + image: ghcr.io/busybox:1.35 + - name: busybox02 + image: ghcr.io/busybox:1.35 +--- \ No newline at end of file diff --git a/other-cel/advanced-restrict-image-registries/.chainsaw-test/pod-good.yaml b/other-cel/advanced-restrict-image-registries/.chainsaw-test/pod-good.yaml new file mode 100644 index 000000000..7d9b3714b --- /dev/null +++ b/other-cel/advanced-restrict-image-registries/.chainsaw-test/pod-good.yaml @@ -0,0 +1,45 @@ +apiVersion: v1 +kind: Pod +metadata: + name: goodpod01 + namespace: imageregistries-ns01 +spec: + initContainers: + - name: busybox01-init + image: img.corp.com/busybox:1.35 + # - name: busybox02-init + # image: ghcr.io/busybox:1.35 + containers: + # - name: busybox01 + # image: ghcr.io/busybox:1.35 + - name: busybox02 + image: img.corp.com/busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod02 + namespace: imageregistries-ns02 +spec: + initContainers: + - name: busybox01-init + image: ghcr.io/busybox:1.35 + containers: + - name: busybox01 + image: docker.io/busybox:1.35 + - name: busybox02 + image: docker.io/busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod03 +spec: + initContainers: + - name: busybox01-init + image: ghcr.io/busybox:1.35 + containers: + - name: busybox01 + image: ghcr.io/busybox:1.35 + - name: busybox02 + image: ghcr.io/busybox:1.35 \ No newline at end of file diff --git a/other-cel/advanced-restrict-image-registries/.chainsaw-test/podcontroller-bad.yaml b/other-cel/advanced-restrict-image-registries/.chainsaw-test/podcontroller-bad.yaml new file mode 100644 index 000000000..f2b36e075 --- /dev/null +++ b/other-cel/advanced-restrict-image-registries/.chainsaw-test/podcontroller-bad.yaml @@ -0,0 +1,50 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: baddeploy01 + namespace: imageregistries-ns01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + template: + metadata: + labels: + app: busybox + spec: + initContainers: + - name: busybox01-init + image: docker.io/busybox:1.35 + - name: busybox02-init + image: ghcr.io/busybox:1.35 + containers: + - name: busybox01 + image: ghcr.io/busybox:1.35 + - name: busybox02 + image: corp.img.io/busybox:1.35 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob01 + namespace: imageregistries-ns02 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + spec: + initContainers: + - name: busybox01-init + image: docker.io/busybox:1.35 + - name: busybox02-init + image: ghcr.io/busybox:1.35 + containers: + - name: busybox01 + image: ghcr.io/busybox:1.35 + - name: busybox02 + image: corp.img.io/busybox:1.35 + restartPolicy: OnFailure \ No newline at end of file diff --git a/other-cel/advanced-restrict-image-registries/.chainsaw-test/podcontroller-good.yaml b/other-cel/advanced-restrict-image-registries/.chainsaw-test/podcontroller-good.yaml new file mode 100644 index 000000000..5d4e2d168 --- /dev/null +++ b/other-cel/advanced-restrict-image-registries/.chainsaw-test/podcontroller-good.yaml @@ -0,0 +1,50 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: gooddeploy01 + namespace: imageregistries-ns01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + template: + metadata: + labels: + app: busybox + spec: + initContainers: + - name: busybox01-init + image: img.corp.com/busybox:1.35 + - name: busybox02-init + image: ghcr.io/busybox:1.35 + containers: + - name: busybox01 + image: ghcr.io/busybox:1.35 + - name: busybox02 + image: img.corp.com/busybox:1.35 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob01 + namespace: imageregistries-ns02 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + spec: + initContainers: + - name: busybox01-init + image: docker.io/busybox:1.35 + - name: busybox02-init + image: ghcr.io/busybox:1.35 + containers: + - name: busybox01 + image: ghcr.io/busybox:1.35 + - name: busybox02 + image: docker.io/busybox:1.35 + restartPolicy: OnFailure \ No newline at end of file diff --git a/other-cel/advanced-restrict-image-registries/.chainsaw-test/policy-ready.yaml b/other-cel/advanced-restrict-image-registries/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..817091e5a --- /dev/null +++ b/other-cel/advanced-restrict-image-registries/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: advanced-restrict-image-registries +status: + ready: true diff --git a/other-cel/advanced-restrict-image-registries/advanced-restrict-image-registries.yaml b/other-cel/advanced-restrict-image-registries/advanced-restrict-image-registries.yaml new file mode 100644 index 000000000..d0471c492 --- /dev/null +++ b/other-cel/advanced-restrict-image-registries/advanced-restrict-image-registries.yaml @@ -0,0 +1,54 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: advanced-restrict-image-registries + annotations: + policies.kyverno.io/title: Advanced Restrict Image Registries in CEL expressions + policies.kyverno.io/category: Other in CEL + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + In instances where a ClusterPolicy defines all the approved image registries + is insufficient, more granular control may be needed to set permitted registries, + especially in multi-tenant use cases where some registries may be based on + the Namespace. This policy shows an advanced version of the Restrict Image Registries + policy which gets a global approved registry from a ConfigMap and, based upon an + annotation at the Namespace level, gets the registry approved for that Namespace. +spec: + validationFailureAction: Audit + background: false + rules: + - name: validate-corp-registries + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + paramKind: + apiVersion: v1 + kind: ConfigMap + paramRef: + name: clusterregistries + namespace: default + parameterNotFoundAction: Deny + variables: + - name: allContainers + expression: "(object.spec.containers + (has(object.spec.initContainers) ? object.spec.initContainers : []) + (has(object.spec.ephemeralContainers) ? object.spec.ephemeralContainers : []))" + - name: nsregistries + expression: >- + (has(namespaceObject.metadata.annotations) && 'corp.com/allowed-registries' in namespaceObject.metadata.annotations) ? + namespaceObject.metadata.annotations['corp.com/allowed-registries'] : ' ' + - name: clusterregistries + expression: "'registries' in params.data ? params.data['registries'] : ' '" + expressions: + - expression: "variables.allContainers.all(container, container.image.startsWith(variables.nsregistries) || container.image.startsWith(variables.clusterregistries))" + message: This Pod names an image that is not from an approved registry. + diff --git a/other-cel/advanced-restrict-image-registries/artifacthub-pkg.yml b/other-cel/advanced-restrict-image-registries/artifacthub-pkg.yml new file mode 100644 index 000000000..edb0ed41a --- /dev/null +++ b/other-cel/advanced-restrict-image-registries/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: advanced-restrict-image-registries-cel +version: 1.0.0 +displayName: Advanced Restrict Image Registries in CEL expressions +description: >- + In instances where a ClusterPolicy defines all the approved image registries is insufficient, more granular control may be needed to set permitted registries, especially in multi-tenant use cases where some registries may be based on the Namespace. This policy shows an advanced version of the Restrict Image Registries policy which gets a global approved registry from a ConfigMap and, based upon an annotation at the Namespace level, gets the registry approved for that Namespace. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/advanced-restrict-image-registries/advanced-restrict-image-registries.yaml + ``` +keywords: + - kyverno + - Other + - CEL Expressions +readme: | + In instances where a ClusterPolicy defines all the approved image registries is insufficient, more granular control may be needed to set permitted registries, especially in multi-tenant use cases where some registries may be based on the Namespace. This policy shows an advanced version of the Restrict Image Registries policy which gets a global approved registry from a ConfigMap and, based upon an annotation at the Namespace level, gets the registry approved for that Namespace. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Other in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Pod" +digest: ada2e1e0dd2db1d27d973c07375812e415fb1592c9d1ea26a89850c090520ce4 +createdAt: "2024-04-21T11:03:06Z" + diff --git a/other-cel/allowed-annotations/allowed-annotations.yaml b/other-cel/allowed-annotations/allowed-annotations.yaml index 6832fa2d1..cafedc945 100644 --- a/other-cel/allowed-annotations/allowed-annotations.yaml +++ b/other-cel/allowed-annotations/allowed-annotations.yaml @@ -25,6 +25,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/allowed-annotations/artifacthub-pkg.yml b/other-cel/allowed-annotations/artifacthub-pkg.yml index e549978d1..e5e260c29 100644 --- a/other-cel/allowed-annotations/artifacthub-pkg.yml +++ b/other-cel/allowed-annotations/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Other in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod, Annotation" -digest: c917791b2d807cd00117591ba8fa05e7453aa3a8e0c9c1a8d20165ac63150e0c +digest: e18bedf7d0b6b1b6ac5d723071d78a1594c325620a1ebd28dd8798414da786b2 createdAt: "2024-03-17T14:04:46Z" diff --git a/other-cel/allowed-pod-priorities/allowed-pod-priorities.yaml b/other-cel/allowed-pod-priorities/allowed-pod-priorities.yaml index 3f1dd2c03..89ce2ee29 100644 --- a/other-cel/allowed-pod-priorities/allowed-pod-priorities.yaml +++ b/other-cel/allowed-pod-priorities/allowed-pod-priorities.yaml @@ -25,6 +25,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: paramKind: diff --git a/other-cel/allowed-pod-priorities/artifacthub-pkg.yml b/other-cel/allowed-pod-priorities/artifacthub-pkg.yml index e09855f14..a110f285f 100644 --- a/other-cel/allowed-pod-priorities/artifacthub-pkg.yml +++ b/other-cel/allowed-pod-priorities/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Sample in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: a017b81b233cd26270cd2d5f74724846c44b9782997545805014a585115bf1f2 +digest: fc3abdb001c9cd666cc784d67eb584800a1a5ab357fbf3616dee2c5752e0f805 createdAt: "2024-03-19T17:20:47Z" diff --git a/other-cel/block-ephemeral-containers/artifacthub-pkg.yml b/other-cel/block-ephemeral-containers/artifacthub-pkg.yml index 370efc51c..40fe2c5a3 100644 --- a/other-cel/block-ephemeral-containers/artifacthub-pkg.yml +++ b/other-cel/block-ephemeral-containers/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Other in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: 13da34209be549d9904eb9142840242db2ae000b1935e8c3c84d23368886fab9 +digest: 9f035b4eb5a4aedeb5c770b03affe6a30a58ee02b79601b2335ead2b0b270f8d createdAt: "2024-03-20T08:34:56Z" diff --git a/other-cel/block-ephemeral-containers/block-ephemeral-containers.yaml b/other-cel/block-ephemeral-containers/block-ephemeral-containers.yaml index a9223784c..57bda33c5 100644 --- a/other-cel/block-ephemeral-containers/block-ephemeral-containers.yaml +++ b/other-cel/block-ephemeral-containers/block-ephemeral-containers.yaml @@ -25,6 +25,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/check-env-vars/artifacthub-pkg.yml b/other-cel/check-env-vars/artifacthub-pkg.yml index 6fb029914..c38d0ebdb 100644 --- a/other-cel/check-env-vars/artifacthub-pkg.yml +++ b/other-cel/check-env-vars/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Other in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: 3cf38de6f83c3a51ab01548ea6fc0ae1f69538a5c0ed2f163180eaea1c60e4aa +digest: 4ed73ec1d10a3333d9fd87665880a4645e031de173c08dbda63eecfba2580dbe createdAt: "2024-03-21T13:31:53Z" diff --git a/other-cel/check-env-vars/check-env-vars.yaml b/other-cel/check-env-vars/check-env-vars.yaml index b894fe30c..36f980652 100644 --- a/other-cel/check-env-vars/check-env-vars.yaml +++ b/other-cel/check-env-vars/check-env-vars.yaml @@ -25,6 +25,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/check-node-for-cve-2022-0185/artifacthub-pkg.yml b/other-cel/check-node-for-cve-2022-0185/artifacthub-pkg.yml index 8c045eaa9..1706761b7 100644 --- a/other-cel/check-node-for-cve-2022-0185/artifacthub-pkg.yml +++ b/other-cel/check-node-for-cve-2022-0185/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Other in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Node" -digest: c45321cd579c25bc971467d63d146c6ebef7942b94f72069b6d4d97f332f2df3 +digest: d7eef8bbbe1f7e2a624a93520835944c521838364d020c8b14ecd7c52f1d6107 createdAt: "2024-03-21T14:21:00Z" diff --git a/other-cel/check-node-for-cve-2022-0185/check-node-for-cve-2022-0185.yaml b/other-cel/check-node-for-cve-2022-0185/check-node-for-cve-2022-0185.yaml index 41937ed07..8ab32a1f1 100644 --- a/other-cel/check-node-for-cve-2022-0185/check-node-for-cve-2022-0185.yaml +++ b/other-cel/check-node-for-cve-2022-0185/check-node-for-cve-2022-0185.yaml @@ -26,6 +26,9 @@ spec: - resources: kinds: - Node + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/check-serviceaccount-secrets/check-serviceaccount-secrets.yaml b/other-cel/check-serviceaccount-secrets/check-serviceaccount-secrets.yaml index 8c66e11ba..96ef42b02 100644 --- a/other-cel/check-serviceaccount-secrets/check-serviceaccount-secrets.yaml +++ b/other-cel/check-serviceaccount-secrets/check-serviceaccount-secrets.yaml @@ -26,6 +26,9 @@ spec: - resources: kinds: - ServiceAccount + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/deny-commands-in-exec-probe/.chainsaw-test/chainsaw-test.yaml b/other-cel/deny-commands-in-exec-probe/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..74dd0c4e8 --- /dev/null +++ b/other-cel/deny-commands-in-exec-probe/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,38 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: deny-commands-in-exec-probe +spec: + steps: + - name: step-01 + try: + - apply: + file: ../deny-commands-in-exec-probe.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: deny-commands-in-exec-probe + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: pods-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: pods-bad.yaml + - apply: + file: podcontrollers-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: podcontrollers-bad.yaml diff --git a/other-cel/deny-commands-in-exec-probe/.chainsaw-test/podcontrollers-bad.yaml b/other-cel/deny-commands-in-exec-probe/.chainsaw-test/podcontrollers-bad.yaml new file mode 100644 index 000000000..1526c45a0 --- /dev/null +++ b/other-cel/deny-commands-in-exec-probe/.chainsaw-test/podcontrollers-bad.yaml @@ -0,0 +1,61 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: baddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + containers: + - image: busybox:1.35 + name: busybox + livenessProbe: + exec: + command: + - ls + periodSeconds: 10 + - image: busybox:1.35 + name: busybox02 + livenessProbe: + exec: + command: + - uptime + periodSeconds: 10 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + spec: + containers: + - image: busybox:1.35 + name: busybox + livenessProbe: + exec: + command: + - echo + - foo + periodSeconds: 10 + - image: busybox:1.35 + name: busybox02 + livenessProbe: + exec: + command: + - ps + - aus + periodSeconds: 10 + restartPolicy: OnFailure \ No newline at end of file diff --git a/other-cel/deny-commands-in-exec-probe/.chainsaw-test/podcontrollers-good.yaml b/other-cel/deny-commands-in-exec-probe/.chainsaw-test/podcontrollers-good.yaml new file mode 100644 index 000000000..e6ee813e1 --- /dev/null +++ b/other-cel/deny-commands-in-exec-probe/.chainsaw-test/podcontrollers-good.yaml @@ -0,0 +1,61 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: gooddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + containers: + - image: busybox:1.35 + name: busybox + livenessProbe: + exec: + command: + - echo + - meow + periodSeconds: 10 + - image: busybox:1.35 + name: busybox02 + livenessProbe: + exec: + command: + - uptime + periodSeconds: 10 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + spec: + containers: + - image: busybox:1.35 + name: busybox + livenessProbe: + exec: + command: + - echo + - meow + periodSeconds: 10 + - image: busybox:1.35 + name: busybox02 + livenessProbe: + exec: + command: + - uptime + periodSeconds: 10 + restartPolicy: OnFailure \ No newline at end of file diff --git a/other-cel/deny-commands-in-exec-probe/.chainsaw-test/pods-bad.yaml b/other-cel/deny-commands-in-exec-probe/.chainsaw-test/pods-bad.yaml new file mode 100644 index 000000000..1b58ef909 --- /dev/null +++ b/other-cel/deny-commands-in-exec-probe/.chainsaw-test/pods-bad.yaml @@ -0,0 +1,106 @@ +apiVersion: v1 +kind: Pod +metadata: + name: badpod01 +spec: + containers: + - image: busybox:1.35 + name: busybox + livenessProbe: + exec: + command: + - ls + periodSeconds: 10 + - image: busybox:1.35 + name: busybox02 + livenessProbe: + exec: + command: + - uptime + periodSeconds: 10 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod02 +spec: + containers: + - image: busybox:1.35 + name: busybox + livenessProbe: + exec: + command: + - ps + - aux + periodSeconds: 10 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod03 +spec: + containers: + - image: busybox:1.35 + name: busybox + - image: busybox:1.35 + name: busybox02 + livenessProbe: + exec: + command: + - jcmd + periodSeconds: 10 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod04 +spec: + containers: + - image: busybox:1.35 + name: busybox + - image: busybox:1.35 + name: busybox02 + livenessProbe: + exec: + command: + - echo + - jcmd + - echo + - hello + periodSeconds: 10 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod05 +spec: + containers: + - image: busybox:1.35 + name: busybox + - image: busybox:1.35 + name: busybox02 + livenessProbe: + exec: + command: + - echo + - $(jcmd) + - echo + periodSeconds: 10 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod06 +spec: + containers: + - image: busybox:1.35 + name: busybox + - image: busybox:1.35 + name: busybox02 + livenessProbe: + exec: + command: + - "echo bar" + - "echo ls foo" + - "echo bar" + periodSeconds: 10 \ No newline at end of file diff --git a/other-cel/deny-commands-in-exec-probe/.chainsaw-test/pods-good.yaml b/other-cel/deny-commands-in-exec-probe/.chainsaw-test/pods-good.yaml new file mode 100644 index 000000000..dc0c71226 --- /dev/null +++ b/other-cel/deny-commands-in-exec-probe/.chainsaw-test/pods-good.yaml @@ -0,0 +1,59 @@ +apiVersion: v1 +kind: Pod +metadata: + name: goodpod01 +spec: + containers: + - image: busybox:1.35 + name: busybox + livenessProbe: + exec: + command: + - echo + - meow + periodSeconds: 10 + - image: busybox:1.35 + name: busybox02 + livenessProbe: + exec: + command: + - uptime + periodSeconds: 10 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod02 +spec: + containers: + - image: busybox:1.35 + name: busybox + livenessProbe: + exec: + command: + - uptime + periodSeconds: 10 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod03 +spec: + containers: + - image: busybox:1.35 + name: busybox + - image: busybox:1.35 + name: busybox02 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod04 +spec: + containers: + - image: busybox:1.35 + name: busybox + livenessProbe: + grpc: + port: 8888 + periodSeconds: 10 \ No newline at end of file diff --git a/other-cel/deny-commands-in-exec-probe/.chainsaw-test/policy-ready.yaml b/other-cel/deny-commands-in-exec-probe/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..a95d54494 --- /dev/null +++ b/other-cel/deny-commands-in-exec-probe/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: deny-commands-in-exec-probe +status: + ready: true diff --git a/other-cel/deny-commands-in-exec-probe/.kyverno-test/kyverno-test.yaml b/other-cel/deny-commands-in-exec-probe/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..0e9825553 --- /dev/null +++ b/other-cel/deny-commands-in-exec-probe/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,30 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: deny-commands-in-exec-probe +policies: +- ../deny-commands-in-exec-probe.yaml +resources: +- resource.yaml +results: +- kind: Pod + policy: deny-commands-in-exec-probe + resources: + - badpod01 + - badpod02 + - badpod03 + result: fail + rule: check-commands +- kind: Pod + policy: deny-commands-in-exec-probe + resources: + - goodpod02 + - goodpod03 + result: pass + rule: check-commands +- kind: Pod + policy: deny-commands-in-exec-probe + resources: + - goodpod01 + result: skip + rule: check-commands diff --git a/other-cel/deny-commands-in-exec-probe/.kyverno-test/resource.yaml b/other-cel/deny-commands-in-exec-probe/.kyverno-test/resource.yaml new file mode 100644 index 000000000..2e3810eb6 --- /dev/null +++ b/other-cel/deny-commands-in-exec-probe/.kyverno-test/resource.yaml @@ -0,0 +1,90 @@ +apiVersion: v1 +kind: Pod +metadata: + name: badpod01 +spec: + containers: + - name: container01 + image: czjunkfoo + livenessProbe: + exec: + command: + - /bin/sh + - -c + - jcmd | grep Main +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod02 +spec: + containers: + - name: container01 + image: czjunkfoo + livenessProbe: + exec: + command: + - /bin/sh + - -c + - cat | ls -l +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod03 +spec: + containers: + - name: container01 + image: czjunkfoo + livenessProbe: + exec: + command: + - /bin/sh + - -c + - echo ps -aux | grep cala +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod01 +spec: + containers: + - name: container02 + image: czjunkfoo + - name: container03 + image: czjunkfoo + livenessProbe: + httpGet: + port: 8080 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod02 +spec: + containers: + - name: container04 + image: czjunkfoo + livenessProbe: + exec: + command: + - /bin/sh + - -c + - echo foo +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod03 +spec: + containers: + - name: container04 + image: czjunkfoo + livenessProbe: + exec: + command: + - /bin/sh + - -c + - env + - name: container05 + image: czjunkfoo diff --git a/other-cel/deny-commands-in-exec-probe/artifacthub-pkg.yml b/other-cel/deny-commands-in-exec-probe/artifacthub-pkg.yml new file mode 100644 index 000000000..2edc8226f --- /dev/null +++ b/other-cel/deny-commands-in-exec-probe/artifacthub-pkg.yml @@ -0,0 +1,25 @@ +name: deny-commands-in-exec-probe-cel +version: 1.0.0 +displayName: Deny Commands in Exec Probe in CEL expressions +description: >- + Developers may feel compelled to use simple shell commands as a workaround to creating "proper" liveness or readiness probes for a Pod. Such a practice can be discouraged via detection of those commands. This policy prevents the use of certain commands `jcmd`, `ps`, or `ls` if found in a Pod's liveness exec probe. + +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/deny-commands-in-exec-probe/deny-commands-in-exec-probe.yaml + ``` +keywords: + - kyverno + - Other + - CEL Expressions +readme: | + Developers may feel compelled to use simple shell commands as a workaround to creating "proper" liveness or readiness probes for a Pod. Such a practice can be discouraged via detection of those commands. This policy prevents the use of certain commands `jcmd`, `ps`, or `ls` if found in a Pod's liveness exec probe. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Other in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Pod" +digest: af3cef475e79cc67105ba3a2be80f0692ea3744f14a9ccd3917d8de8d251e5d0 +createdAt: "2024-04-25T18:27:10Z" + diff --git a/other-cel/deny-commands-in-exec-probe/deny-commands-in-exec-probe.yaml b/other-cel/deny-commands-in-exec-probe/deny-commands-in-exec-probe.yaml new file mode 100644 index 000000000..a9381ee1f --- /dev/null +++ b/other-cel/deny-commands-in-exec-probe/deny-commands-in-exec-probe.yaml @@ -0,0 +1,45 @@ +apiVersion: kyverno.io/v2beta1 +kind: ClusterPolicy +metadata: + name: deny-commands-in-exec-probe + annotations: + policies.kyverno.io/title: Deny Commands in Exec Probe in CEL expressions + policies.kyverno.io/category: Other in CEL + policies.kyverno.io/subject: Pod + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Developers may feel compelled to use simple shell commands as a workaround to + creating "proper" liveness or readiness probes for a Pod. Such a practice can be discouraged + via detection of those commands. This policy prevents the use of certain commands + `jcmd`, `ps`, or `ls` if found in a Pod's liveness exec probe. +spec: + validationFailureAction: Audit + background: false + rules: + - name: check-commands + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + celPreconditions: + - name: "check-liveness-probes-commands-exist" + expression: >- + object.spec.containers.exists(container, + has(container.livenessProbe) && has(container.livenessProbe.exec) && + size(container.livenessProbe.exec.command) > 0) + validate: + cel: + expressions: + - expression: >- + object.spec.containers.all(container, + !has(container.livenessProbe) || !has(container.livenessProbe.exec) || + !container.livenessProbe.exec.command.exists(command, + command.matches('\\bjcmd\\b') || command.matches('\\bps\\b') || command.matches('\\bls\\b'))) + message: Cannot use commands `jcmd`, `ps`, or `ls` in liveness probes. + diff --git a/other-cel/deny-secret-service-account-token-type/deny-secret-service-account-token-type.yaml b/other-cel/deny-secret-service-account-token-type/deny-secret-service-account-token-type.yaml index 5f5a76657..22453f86f 100644 --- a/other-cel/deny-secret-service-account-token-type/deny-secret-service-account-token-type.yaml +++ b/other-cel/deny-secret-service-account-token-type/deny-secret-service-account-token-type.yaml @@ -25,6 +25,9 @@ spec: - resources: kinds: - Secret + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/disallow-all-secrets/artifacthub-pkg.yml b/other-cel/disallow-all-secrets/artifacthub-pkg.yml index 4d98a3cc0..8b637d856 100644 --- a/other-cel/disallow-all-secrets/artifacthub-pkg.yml +++ b/other-cel/disallow-all-secrets/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Other in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod, Secret" -digest: 298fbab361ee9e46721a4afb06212ac6689988f87f257709b82624ef5393ebd5 +digest: 56e5facdefabb17337fca54838bb54025c60d69660091f213ad366ef94f6fd57 createdAt: "2024-03-23T11:14:09Z" diff --git a/other-cel/disallow-all-secrets/disallow-all-secrets.yaml b/other-cel/disallow-all-secrets/disallow-all-secrets.yaml index 944ac4aeb..b868747ef 100644 --- a/other-cel/disallow-all-secrets/disallow-all-secrets.yaml +++ b/other-cel/disallow-all-secrets/disallow-all-secrets.yaml @@ -24,6 +24,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: variables: diff --git a/other-cel/disallow-localhost-services/artifacthub-pkg.yml b/other-cel/disallow-localhost-services/artifacthub-pkg.yml index f27b28fb1..c4882b0f1 100644 --- a/other-cel/disallow-localhost-services/artifacthub-pkg.yml +++ b/other-cel/disallow-localhost-services/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Sample in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Service" -digest: 6987150bedeaf5bafe4c819cc48b6c2660de1a66b007f24807d88d7a0407a3ba +digest: 6e294a594d7f369411b8857bfe573822e69dfe6b001fded547fb6edb2c4b7b6a createdAt: "2024-03-23T12:17:54Z" diff --git a/other-cel/disallow-localhost-services/disallow-localhost-services.yaml b/other-cel/disallow-localhost-services/disallow-localhost-services.yaml index b2cdc2315..247f5d900 100644 --- a/other-cel/disallow-localhost-services/disallow-localhost-services.yaml +++ b/other-cel/disallow-localhost-services/disallow-localhost-services.yaml @@ -23,6 +23,9 @@ spec: - resources: kinds: - Service + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/disallow-secrets-from-env-vars/artifacthub-pkg.yml b/other-cel/disallow-secrets-from-env-vars/artifacthub-pkg.yml index 2da9f7c41..b2bc353df 100644 --- a/other-cel/disallow-secrets-from-env-vars/artifacthub-pkg.yml +++ b/other-cel/disallow-secrets-from-env-vars/artifacthub-pkg.yml @@ -20,6 +20,6 @@ annotations: kyverno/category: "Sample, EKS Best Practices in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod, Secret" -digest: 52e12553f5be68f8e155a88f87e81eefeb8008acea66939a570d597afe16184b +digest: 71ff57f46c814a0971e9fb70f065ca0ab2a4308d9f5d56b1a9f8032eef83782b createdAt: "2024-03-24T16:54:45Z" diff --git a/other-cel/disallow-secrets-from-env-vars/disallow-secrets-from-env-vars.yaml b/other-cel/disallow-secrets-from-env-vars/disallow-secrets-from-env-vars.yaml index 0f03a8b7e..fc36431f2 100644 --- a/other-cel/disallow-secrets-from-env-vars/disallow-secrets-from-env-vars.yaml +++ b/other-cel/disallow-secrets-from-env-vars/disallow-secrets-from-env-vars.yaml @@ -23,6 +23,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/docker-socket-requires-label/artifacthub-pkg.yml b/other-cel/docker-socket-requires-label/artifacthub-pkg.yml index 0a0e379c8..1579a8d2c 100644 --- a/other-cel/docker-socket-requires-label/artifacthub-pkg.yml +++ b/other-cel/docker-socket-requires-label/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Other in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: d577dea5bad5971c21bc1036f97a85c1701a3fdcb2800ee8b4f0708dc2b58101 +digest: b7f8b5251d1670da5514515e7ae4ffde77d8c9cb0d28c0dcaee61ba13adfd035 createdAt: "2024-03-27T12:13:52Z" diff --git a/other-cel/docker-socket-requires-label/docker-socket-requires-label.yaml b/other-cel/docker-socket-requires-label/docker-socket-requires-label.yaml index 138b36336..26f26975e 100644 --- a/other-cel/docker-socket-requires-label/docker-socket-requires-label.yaml +++ b/other-cel/docker-socket-requires-label/docker-socket-requires-label.yaml @@ -24,6 +24,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: variables: diff --git a/other-cel/enforce-pod-duration/artifacthub-pkg.yml b/other-cel/enforce-pod-duration/artifacthub-pkg.yml index c2c68faf6..8505baaf9 100644 --- a/other-cel/enforce-pod-duration/artifacthub-pkg.yml +++ b/other-cel/enforce-pod-duration/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Sample in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: b2f1fec7c8b199024c813b1ddb3d52f27f889d082c0c94f4824c499cd6b278bb +digest: 174a456e5d4afd8c8baa9c0c3bdb7da0e09934673f0544d575e5aad6aab5e644 createdAt: "2024-03-30T18:18:11Z" diff --git a/other-cel/enforce-pod-duration/enforce-pod-duration.yaml b/other-cel/enforce-pod-duration/enforce-pod-duration.yaml index 48ccc0fb7..33a01dc2b 100644 --- a/other-cel/enforce-pod-duration/enforce-pod-duration.yaml +++ b/other-cel/enforce-pod-duration/enforce-pod-duration.yaml @@ -22,6 +22,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: variables: diff --git a/other-cel/enforce-readwriteonce-pod/artifacthub-pkg.yml b/other-cel/enforce-readwriteonce-pod/artifacthub-pkg.yml index 520a5ac06..91e0e8cec 100644 --- a/other-cel/enforce-readwriteonce-pod/artifacthub-pkg.yml +++ b/other-cel/enforce-readwriteonce-pod/artifacthub-pkg.yml @@ -29,6 +29,6 @@ annotations: kyverno/category: "Sample in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "PersistentVolumeClaims" -digest: de7662c3394731c2de9205ebdda2da9da69e8022b616ca6e4ea9dbfd8ad2b2a8 +digest: c3595da6ec53e127aca4f08c38095764d652aa268ebfde21d3445545c75e1615 createdAt: "2024-03-31T10:53:27Z" diff --git a/other-cel/enforce-readwriteonce-pod/enforce-readwriteonce-pod.yaml b/other-cel/enforce-readwriteonce-pod/enforce-readwriteonce-pod.yaml index 5c5720e4d..ee0636ace 100644 --- a/other-cel/enforce-readwriteonce-pod/enforce-readwriteonce-pod.yaml +++ b/other-cel/enforce-readwriteonce-pod/enforce-readwriteonce-pod.yaml @@ -25,6 +25,9 @@ spec: - resources: kinds: - PersistentVolumeClaim + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/ensure-probes-different/artifacthub-pkg.yml b/other-cel/ensure-probes-different/artifacthub-pkg.yml index f90d1059f..c4fed20e3 100644 --- a/other-cel/ensure-probes-different/artifacthub-pkg.yml +++ b/other-cel/ensure-probes-different/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Sample in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: cbafa29e49ec48f7378157f69fa77a53c07fb40dc4c542738a8f31331689f5be +digest: 95447cdc8a2287d3d0d9f300dd82bd62709d1bbe91c60ba2b11c8ce0a318bbcb createdAt: "2024-03-31T11:12:02Z" diff --git a/other-cel/ensure-probes-different/ensure-probes-different.yaml b/other-cel/ensure-probes-different/ensure-probes-different.yaml index 241ac246d..f49bb2104 100644 --- a/other-cel/ensure-probes-different/ensure-probes-different.yaml +++ b/other-cel/ensure-probes-different/ensure-probes-different.yaml @@ -27,6 +27,9 @@ spec: - Deployment - DaemonSet - StatefulSet + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/ensure-readonly-hostpath/artifacthub-pkg.yml b/other-cel/ensure-readonly-hostpath/artifacthub-pkg.yml index 1c09131ab..b94f2dc6b 100644 --- a/other-cel/ensure-readonly-hostpath/artifacthub-pkg.yml +++ b/other-cel/ensure-readonly-hostpath/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Other in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: 5335b84399ed1bb06e70489940d2555cff0c97f7f937aac0fbdf8ee0a188ace1 +digest: 203506a9391a1d4bee3ed42209ab7e964606aee881db2cd93290bd075c98840b createdAt: "2024-04-05T17:39:16Z" diff --git a/other-cel/ensure-readonly-hostpath/ensure-readonly-hostpath.yaml b/other-cel/ensure-readonly-hostpath/ensure-readonly-hostpath.yaml index ea97b78be..acfd4db5f 100644 --- a/other-cel/ensure-readonly-hostpath/ensure-readonly-hostpath.yaml +++ b/other-cel/ensure-readonly-hostpath/ensure-readonly-hostpath.yaml @@ -27,6 +27,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: variables: diff --git a/other-cel/exclude-namespaces-dynamically/.chainsaw-test/chainsaw-test.yaml b/other-cel/exclude-namespaces-dynamically/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..a388d214e --- /dev/null +++ b/other-cel/exclude-namespaces-dynamically/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,42 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: exclude-namespaces-dynamically +spec: + steps: + - name: step-01 + try: + - apply: + file: cm.yaml + - apply: + file: ns.yaml + - apply: + file: ../exclude-namespaces-dynamically.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: exclude-namespaces-example + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: pod-good.yaml + - apply: + file: podcontroller-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: pod-bad.yaml + - apply: + expect: + - check: + ($error != null): true + file: podcontroller-bad.yaml diff --git a/other-cel/exclude-namespaces-dynamically/.chainsaw-test/cm.yaml b/other-cel/exclude-namespaces-dynamically/.chainsaw-test/cm.yaml new file mode 100644 index 000000000..0c2e3c57a --- /dev/null +++ b/other-cel/exclude-namespaces-dynamically/.chainsaw-test/cm.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +data: + exclude: "exclude-ns, exclude-ns-2" +kind: ConfigMap +metadata: + name: namespace-filters + namespace: default \ No newline at end of file diff --git a/other-cel/exclude-namespaces-dynamically/.chainsaw-test/cmap.yaml b/other-cel/exclude-namespaces-dynamically/.chainsaw-test/cmap.yaml new file mode 100644 index 000000000..891cfb061 --- /dev/null +++ b/other-cel/exclude-namespaces-dynamically/.chainsaw-test/cmap.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: namespace-filters + namespace: default +data: + exclude: "[\"default\", \"test\"]" + \ No newline at end of file diff --git a/other-cel/exclude-namespaces-dynamically/.chainsaw-test/ns.yaml b/other-cel/exclude-namespaces-dynamically/.chainsaw-test/ns.yaml new file mode 100644 index 000000000..4c909ba8b --- /dev/null +++ b/other-cel/exclude-namespaces-dynamically/.chainsaw-test/ns.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: exclude-ns +--- +apiVersion: v1 +kind: Namespace +metadata: + name: exclude-ns-2 \ No newline at end of file diff --git a/other-cel/exclude-namespaces-dynamically/.chainsaw-test/pod-bad.yaml b/other-cel/exclude-namespaces-dynamically/.chainsaw-test/pod-bad.yaml new file mode 100644 index 000000000..78823c79e --- /dev/null +++ b/other-cel/exclude-namespaces-dynamically/.chainsaw-test/pod-bad.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + name: badpod01 +spec: + containers: + - name: pod01 + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod02 + namespace: default +spec: + containers: + - name: pod01 + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod03 + labels: + bar: foo +spec: + containers: + - name: pod01 + image: busybox:1.35 \ No newline at end of file diff --git a/other-cel/exclude-namespaces-dynamically/.chainsaw-test/pod-good.yaml b/other-cel/exclude-namespaces-dynamically/.chainsaw-test/pod-good.yaml new file mode 100644 index 000000000..9f2d32670 --- /dev/null +++ b/other-cel/exclude-namespaces-dynamically/.chainsaw-test/pod-good.yaml @@ -0,0 +1,43 @@ +apiVersion: v1 +kind: Pod +metadata: + name: goodpod01 + namespace: exclude-ns +spec: + containers: + - name: pod01 + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod02 + namespace: exclude-ns-2 +spec: + containers: + - name: pod01 + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod03 + labels: + bar: foo + foo: bar +spec: + containers: + - name: pod01 + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod04 + namespace: exclude-ns-2 + labels: + foo: bar +spec: + containers: + - name: pod01 + image: busybox:1.35 \ No newline at end of file diff --git a/other-cel/exclude-namespaces-dynamically/.chainsaw-test/podcontroller-bad.yaml b/other-cel/exclude-namespaces-dynamically/.chainsaw-test/podcontroller-bad.yaml new file mode 100644 index 000000000..931d85e11 --- /dev/null +++ b/other-cel/exclude-namespaces-dynamically/.chainsaw-test/podcontroller-bad.yaml @@ -0,0 +1,35 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: baddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: bb-01 + image: busybox:1.35 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + spec: + containers: + - name: bb-01 + image: kyverno + restartPolicy: OnFailure \ No newline at end of file diff --git a/other-cel/exclude-namespaces-dynamically/.chainsaw-test/podcontroller-good.yaml b/other-cel/exclude-namespaces-dynamically/.chainsaw-test/podcontroller-good.yaml new file mode 100644 index 000000000..1360dedfa --- /dev/null +++ b/other-cel/exclude-namespaces-dynamically/.chainsaw-test/podcontroller-good.yaml @@ -0,0 +1,77 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: gooddeployment01 + namespace: exclude-ns +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: bb-01 + image: busybox:1.35 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: gooddeployment02 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + foo: bar + spec: + containers: + - name: bb-01 + image: busybox:1.35 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob01 + namespace: exclude-ns-2 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + spec: + containers: + - name: bb-01 + image: kyverno + restartPolicy: OnFailure +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob02 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + labels: + foo: bar + spec: + containers: + - name: bb-01 + image: kyverno + restartPolicy: OnFailure \ No newline at end of file diff --git a/other-cel/exclude-namespaces-dynamically/.chainsaw-test/policy-ready.yaml b/other-cel/exclude-namespaces-dynamically/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..451f8163f --- /dev/null +++ b/other-cel/exclude-namespaces-dynamically/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: exclude-namespaces-example +status: + ready: true diff --git a/other-cel/exclude-namespaces-dynamically/artifacthub-pkg.yml b/other-cel/exclude-namespaces-dynamically/artifacthub-pkg.yml new file mode 100644 index 000000000..817299960 --- /dev/null +++ b/other-cel/exclude-namespaces-dynamically/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: exclude-namespaces-dynamically-cel +version: 1.0.0 +displayName: Exclude Namespaces Dynamically in CEL expressions +description: >- + It's common where policy lookups need to consider a mapping to many possible values rather than a static mapping. This is a sample which demonstrates how to dynamically look up an allow list of Namespaces from a ConfigMap where the ConfigMap stores an array of strings. This policy validates that any Pods created outside of the list of Namespaces have the label `foo` applied. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/exclude-namespaces-dynamically/exclude-namespaces-dynamically.yaml + ``` +keywords: + - kyverno + - Sample + - CEL Expressions +readme: | + It's common where policy lookups need to consider a mapping to many possible values rather than a static mapping. This is a sample which demonstrates how to dynamically look up an allow list of Namespaces from a ConfigMap where the ConfigMap stores an array of strings. This policy validates that any Pods created outside of the list of Namespaces have the label `foo` applied. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Sample in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Namespace, Pod" +digest: 5ddbe0a585b27d938e5ae070444d0d8f346785f8566b28bcbfef1dc0d90cd3f4 +createdAt: "2024-04-24T18:58:33Z" + diff --git a/other-cel/exclude-namespaces-dynamically/exclude-namespaces-dynamically.yaml b/other-cel/exclude-namespaces-dynamically/exclude-namespaces-dynamically.yaml new file mode 100644 index 000000000..dc5a65852 --- /dev/null +++ b/other-cel/exclude-namespaces-dynamically/exclude-namespaces-dynamically.yaml @@ -0,0 +1,109 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: exclude-namespaces-example + annotations: + policies.kyverno.io/title: Exclude Namespaces Dynamically in CEL expressions + policies.kyverno.io/category: Sample in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Namespace, Pod + policies.kyverno.io/minversion: 1.11.0 + pod-policies.kyverno.io/autogen-controllers: none + kyverno.io/kyverno-version: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + It's common where policy lookups need to consider a mapping to many possible values rather than a + static mapping. This is a sample which demonstrates how to dynamically look up an allow list of Namespaces from a ConfigMap + where the ConfigMap stores an array of strings. This policy validates that any Pods created + outside of the list of Namespaces have the label `foo` applied. +spec: + validationFailureAction: Audit + background: true + rules: + - name: exclude-namespaces-dynamically + match: + any: + - resources: + kinds: + - Deployment + - DaemonSet + - StatefulSet + - Job + operations: + - CREATE + - UPDATE + celPreconditions: + - name: "filter-namespaces" + expression: "!(request.namespace in params.data['exclude'].split(', '))" + validate: + cel: + paramKind: + apiVersion: v1 + kind: ConfigMap + paramRef: + name: namespace-filters + namespace: default + parameterNotFoundAction: Deny + expressions: + - expression: "has(object.spec.template.metadata) && has(object.spec.template.metadata.labels) && 'foo' in object.spec.template.metadata.labels" + messageExpression: > + 'Creating Pods in the ' + request.namespace + ' namespace,' + + ' which is not in the excluded list of namespaces' + params.data.exclude + ',' + + ' is forbidden unless it carries the label `foo`.' + - name: exclude-namespaces-dynamically-pods + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + celPreconditions: + - name: "filter-namespaces" + expression: "!(request.namespace in params.data['exclude'].split(', '))" + validate: + cel: + paramKind: + apiVersion: v1 + kind: ConfigMap + paramRef: + name: namespace-filters + namespace: default + parameterNotFoundAction: Deny + expressions: + - expression: "has(object.metadata.labels) && 'foo' in object.metadata.labels" + messageExpression: > + 'Creating Pods in the ' + request.namespace + ' namespace,' + + ' which is not in the excluded list of namespaces ' + params.data.exclude + ',' + + ' is forbidden unless it carries the label `foo`.' + - name: exclude-namespaces-dynamically-cronjobs + match: + any: + - resources: + kinds: + - CronJob + operations: + - CREATE + - UPDATE + celPreconditions: + - name: "filter-namespaces" + expression: "!(request.namespace in params.data['exclude'].split(', '))" + validate: + cel: + paramKind: + apiVersion: v1 + kind: ConfigMap + paramRef: + name: namespace-filters + namespace: default + parameterNotFoundAction: Deny + expressions: + - expression: >- + has(object.spec.jobTemplate.spec.template.metadata) && + has(object.spec.jobTemplate.spec.template.metadata.labels) && 'foo' in object.spec.jobTemplate.spec.template.metadata.labels + messageExpression: > + 'Creating Pods in the ' + request.namespace + ' namespace,' + + ' which is not in the excluded list of namespaces ' + params.data.exclude + ',' + + ' is forbidden unless it carries the label `foo`.' + diff --git a/other-cel/forbid-cpu-limits/artifacthub-pkg.yml b/other-cel/forbid-cpu-limits/artifacthub-pkg.yml index 3e86cb948..f116754ea 100644 --- a/other-cel/forbid-cpu-limits/artifacthub-pkg.yml +++ b/other-cel/forbid-cpu-limits/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Other in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: 2865e5f92968f90e090aff597937ab7db3e3e5939c32cb84c84f881970dedae6 +digest: 8d8febae3e8acfab78c2ccf8b96d086f7086ea156b0a0ac36611db1c8958c357 createdAt: "2024-04-01T15:35:47Z" diff --git a/other-cel/forbid-cpu-limits/forbid-cpu-limits.yaml b/other-cel/forbid-cpu-limits/forbid-cpu-limits.yaml index b94b55756..75a791c85 100644 --- a/other-cel/forbid-cpu-limits/forbid-cpu-limits.yaml +++ b/other-cel/forbid-cpu-limits/forbid-cpu-limits.yaml @@ -22,6 +22,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/imagepullpolicy-always/artifacthub-pkg.yml b/other-cel/imagepullpolicy-always/artifacthub-pkg.yml index 53537fb8b..cf2c42e25 100644 --- a/other-cel/imagepullpolicy-always/artifacthub-pkg.yml +++ b/other-cel/imagepullpolicy-always/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Sample in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: a6708df7cd59fcd4dc4f764ff01541940f39eca5d4ddffd9529d83090e511b47 +digest: 48bf801d9acfef85768bf1f9fb3820a6cee3b9f87acb7a4f07f2449d193934cb createdAt: "2024-04-03T17:41:38Z" diff --git a/other-cel/imagepullpolicy-always/imagepullpolicy-always.yaml b/other-cel/imagepullpolicy-always/imagepullpolicy-always.yaml index 951d07495..e80b97cc3 100644 --- a/other-cel/imagepullpolicy-always/imagepullpolicy-always.yaml +++ b/other-cel/imagepullpolicy-always/imagepullpolicy-always.yaml @@ -24,6 +24,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/ingress-host-match-tls/artifacthub-pkg.yml b/other-cel/ingress-host-match-tls/artifacthub-pkg.yml index 344cc88d1..541c86246 100644 --- a/other-cel/ingress-host-match-tls/artifacthub-pkg.yml +++ b/other-cel/ingress-host-match-tls/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Other in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Ingress" -digest: 5442acaa90c6a45509015995028e241374b76d60cc700fbf6dd9f61178ba432f +digest: 4de00322c258919b797101a18afa3fa262ce78b52132f2fd903cfea8f60d1f1e createdAt: "2024-04-06T17:22:38Z" diff --git a/other-cel/ingress-host-match-tls/ingress-host-match-tls.yaml b/other-cel/ingress-host-match-tls/ingress-host-match-tls.yaml index 27bb57185..6b2f7f551 100644 --- a/other-cel/ingress-host-match-tls/ingress-host-match-tls.yaml +++ b/other-cel/ingress-host-match-tls/ingress-host-match-tls.yaml @@ -26,6 +26,9 @@ spec: - resources: kinds: - Ingress + operations: + - CREATE + - UPDATE validate: cel: variables: diff --git a/other-cel/limit-containers-per-pod/artifacthub-pkg.yml b/other-cel/limit-containers-per-pod/artifacthub-pkg.yml index 92aa34409..85e7e2dc5 100644 --- a/other-cel/limit-containers-per-pod/artifacthub-pkg.yml +++ b/other-cel/limit-containers-per-pod/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Sample in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: 7916672ede794217fb00144785594818cbb66f409c1d2f0d513cfeb944e92ed1 +digest: 6a915cbe21250809e2e9665f9b79dde5f9b1fc77f2538c5f25ec9c5dda86a00b createdAt: "2024-04-01T15:48:55Z" diff --git a/other-cel/limit-containers-per-pod/limit-containers-per-pod.yaml b/other-cel/limit-containers-per-pod/limit-containers-per-pod.yaml index 022377acc..7c14dc3e6 100644 --- a/other-cel/limit-containers-per-pod/limit-containers-per-pod.yaml +++ b/other-cel/limit-containers-per-pod/limit-containers-per-pod.yaml @@ -24,6 +24,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/limit-hostpath-type-pv/artifacthub-pkg.yml b/other-cel/limit-hostpath-type-pv/artifacthub-pkg.yml index 5d0ee14d9..fc2c77549 100644 --- a/other-cel/limit-hostpath-type-pv/artifacthub-pkg.yml +++ b/other-cel/limit-hostpath-type-pv/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Other in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "PersistentVolume" -digest: 981a66b5f77de02d3f6623b49c02421dd1adf4e9882d96a2e0219de9dba52672 +digest: 8f2f85798607f78ce3eb794c08df351a8c171629c64481d5d7575c33b8428333 createdAt: "2024-04-04T17:35:35Z" diff --git a/other-cel/limit-hostpath-type-pv/limit-hostpath-type-pv.yaml b/other-cel/limit-hostpath-type-pv/limit-hostpath-type-pv.yaml index 6004e0e42..b20bf2ba4 100644 --- a/other-cel/limit-hostpath-type-pv/limit-hostpath-type-pv.yaml +++ b/other-cel/limit-hostpath-type-pv/limit-hostpath-type-pv.yaml @@ -24,6 +24,9 @@ spec: - resources: kinds: - PersistentVolume + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/limit-hostpath-vols/.chainsaw-test/chainsaw-test.yaml b/other-cel/limit-hostpath-vols/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..30ace9483 --- /dev/null +++ b/other-cel/limit-hostpath-vols/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,38 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: limit-hostpath-vols +spec: + steps: + - name: step-01 + try: + - apply: + file: ../limit-hostpath-vols.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: limit-hostpath-vols + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: pod-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: pod-bad.yaml + - apply: + file: podcontroller-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: podcontroller-bad.yaml diff --git a/other-cel/limit-hostpath-vols/.chainsaw-test/pod-bad.yaml b/other-cel/limit-hostpath-vols/.chainsaw-test/pod-bad.yaml new file mode 100644 index 000000000..2dfc11c6c --- /dev/null +++ b/other-cel/limit-hostpath-vols/.chainsaw-test/pod-bad.yaml @@ -0,0 +1,36 @@ +apiVersion: v1 +kind: Pod +metadata: + name: badpod01 +spec: + containers: + - name: busybox + image: busybox:1.35 + volumes: + - name: foo + hostPath: + path: /data + - name: bar + hostPath: + path: /etc/junk + - name: empty + emptyDir: + medium: memory + sizeLimit: 20Mi +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod02 +spec: + containers: + - name: busybox + image: busybox:1.35 + volumes: + - name: empty + emptyDir: + medium: memory + sizeLimit: 20Mi + - name: foo + hostPath: + path: /home/junk \ No newline at end of file diff --git a/other-cel/limit-hostpath-vols/.chainsaw-test/pod-good.yaml b/other-cel/limit-hostpath-vols/.chainsaw-test/pod-good.yaml new file mode 100644 index 000000000..92ed9c3a4 --- /dev/null +++ b/other-cel/limit-hostpath-vols/.chainsaw-test/pod-good.yaml @@ -0,0 +1,71 @@ +apiVersion: v1 +kind: Pod +metadata: + name: goodpod01 +spec: + containers: + - name: busybox + image: busybox:1.35 + volumes: + - name: foo + hostPath: + path: /data + - name: bar + hostPath: + path: /data/junk + - name: empty + emptyDir: + medium: memory + sizeLimit: 20Mi +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod02 +spec: + containers: + - name: busybox + image: busybox:1.35 + volumes: + - name: empty + emptyDir: + medium: memory + sizeLimit: 20Mi + - name: foo + hostPath: + path: /data/junk + - name: config-vol + configMap: + name: foo + items: + - key: foo + path: bar +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod03 +spec: + containers: + - name: busybox + image: busybox:1.35 + volumes: + - name: empty + emptyDir: + medium: memory + sizeLimit: 20Mi + - name: config-vol + configMap: + name: foo + items: + - key: foo + path: bar +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod04 +spec: + containers: + - name: busybox + image: busybox:1.35 \ No newline at end of file diff --git a/other-cel/limit-hostpath-vols/.chainsaw-test/podcontroller-bad.yaml b/other-cel/limit-hostpath-vols/.chainsaw-test/podcontroller-bad.yaml new file mode 100644 index 000000000..0379980ec --- /dev/null +++ b/other-cel/limit-hostpath-vols/.chainsaw-test/podcontroller-bad.yaml @@ -0,0 +1,57 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: baddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 + volumes: + - name: foo + hostPath: + path: /data + - name: bar + hostPath: + path: /etc/junk + - name: empty + emptyDir: + medium: memory + sizeLimit: 20Mi +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + spec: + containers: + - name: busybox + image: busybox:1.35 + volumes: + - name: foo + hostPath: + path: /data + - name: bar + hostPath: + path: /etc/junk + - name: empty + emptyDir: + medium: memory + sizeLimit: 20Mi + restartPolicy: OnFailure \ No newline at end of file diff --git a/other-cel/limit-hostpath-vols/.chainsaw-test/podcontroller-good.yaml b/other-cel/limit-hostpath-vols/.chainsaw-test/podcontroller-good.yaml new file mode 100644 index 000000000..acb202523 --- /dev/null +++ b/other-cel/limit-hostpath-vols/.chainsaw-test/podcontroller-good.yaml @@ -0,0 +1,63 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: gooddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 + volumes: + - name: empty + emptyDir: + medium: memory + sizeLimit: 20Mi + - name: foo + hostPath: + path: /data/junk + - name: config-vol + configMap: + name: foo + items: + - key: foo + path: bar +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + spec: + containers: + - name: busybox + image: busybox:1.35 + volumes: + - name: empty + emptyDir: + medium: memory + sizeLimit: 20Mi + - name: foo + hostPath: + path: /data/junk + - name: config-vol + configMap: + name: foo + items: + - key: foo + path: bar + restartPolicy: OnFailure \ No newline at end of file diff --git a/other-cel/limit-hostpath-vols/.chainsaw-test/policy-ready.yaml b/other-cel/limit-hostpath-vols/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..b5a967f65 --- /dev/null +++ b/other-cel/limit-hostpath-vols/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: limit-hostpath-vols +status: + ready: true diff --git a/other-cel/limit-hostpath-vols/.kyverno-test/badpod.yaml b/other-cel/limit-hostpath-vols/.kyverno-test/badpod.yaml new file mode 100644 index 000000000..52a2b193f --- /dev/null +++ b/other-cel/limit-hostpath-vols/.kyverno-test/badpod.yaml @@ -0,0 +1,36 @@ +apiVersion: v1 +kind: Pod +metadata: + name: bad-pods-all +spec: + initContainers: + - name: inittest-webserver + image: asdfeasdfasada:latest + # volumeMounts: + # - mountPath: /some/dir + # name: bar + # readOnly: true + containers: + - name: test-webserver + image: asdfeasdfasada:latest + volumeMounts: + - mountPath: /some/dir + name: foo + readOnly: true + - name: test-webserver02 + image: sjbonmqopcta:latest + # volumeMounts: + # - mountPath: /some/dir + # name: bar + # readOnly: true + volumes: + - name: foo + hostPath: + path: /etc + # - name: bar + # hostPath: + # path: /data/junk + - name: empty + emptyDir: + medium: memory + sizeLimit: 20Mi \ No newline at end of file diff --git a/other-cel/limit-hostpath-vols/.kyverno-test/goodpod.yaml b/other-cel/limit-hostpath-vols/.kyverno-test/goodpod.yaml new file mode 100644 index 000000000..efa0d501a --- /dev/null +++ b/other-cel/limit-hostpath-vols/.kyverno-test/goodpod.yaml @@ -0,0 +1,36 @@ +apiVersion: v1 +kind: Pod +metadata: + name: good-pods-all +spec: + initContainers: + - name: inittest-webserver + image: asdfeasdfasada:latest + # volumeMounts: + # - mountPath: /some/dir + # name: bar + # readOnly: true + containers: + - name: test-webserver + image: asdfeasdfasada:latest + volumeMounts: + - mountPath: /some/dir + name: foo + readOnly: true + - name: test-webserver02 + image: sjbonmqopcta:latest + # volumeMounts: + # - mountPath: /some/dir + # name: bar + # readOnly: true + volumes: + - name: foo + hostPath: + path: /data + # - name: bar + # hostPath: + # path: /data/junk + - name: empty + emptyDir: + medium: memory + sizeLimit: 20Mi \ No newline at end of file diff --git a/other-cel/limit-hostpath-vols/.kyverno-test/kyverno-test.yaml b/other-cel/limit-hostpath-vols/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..904dc13b1 --- /dev/null +++ b/other-cel/limit-hostpath-vols/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,23 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: limit-hostpath-vols +policies: +- ../limit-hostpath-vols.yaml +resources: +- goodpod.yaml +- badpod.yaml +results: +- kind: Pod + policy: limit-hostpath-vols + resources: + - bad-pods-all + result: fail + rule: limit-hostpath-to-slash-data +- kind: Pod + policy: limit-hostpath-vols + resources: + - good-pods-all + result: pass + rule: limit-hostpath-to-slash-data +variables: values.yaml diff --git a/other-cel/limit-hostpath-vols/.kyverno-test/values.yaml b/other-cel/limit-hostpath-vols/.kyverno-test/values.yaml new file mode 100644 index 000000000..f0bdd4ef1 --- /dev/null +++ b/other-cel/limit-hostpath-vols/.kyverno-test/values.yaml @@ -0,0 +1,8 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Values +policies: +- name: limit-hostpath-vols + resources: + - name: bad-pods-all + values: + request.operation: UPDATE diff --git a/other-cel/limit-hostpath-vols/artifacthub-pkg.yml b/other-cel/limit-hostpath-vols/artifacthub-pkg.yml new file mode 100644 index 000000000..4386e87d4 --- /dev/null +++ b/other-cel/limit-hostpath-vols/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: limit-hostpath-vols-cel +version: 1.0.0 +displayName: Limit hostPath Volumes to Specific Directories in CEL expressions +description: >- + hostPath volumes consume the underlying node's file system. If hostPath volumes are not to be universally disabled, they should be restricted to only certain host paths so as not to allow access to sensitive information. This policy ensures the only directory that can be mounted as a hostPath volume is /data. It is strongly recommended to pair this policy with a second to ensure readOnly access is enforced preventing directory escape. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/limit-hostpath-vols/limit-hostpath-vols.yaml + ``` +keywords: + - kyverno + - Other + - CEL Expressions +readme: | + hostPath volumes consume the underlying node's file system. If hostPath volumes are not to be universally disabled, they should be restricted to only certain host paths so as not to allow access to sensitive information. This policy ensures the only directory that can be mounted as a hostPath volume is /data. It is strongly recommended to pair this policy with a second to ensure readOnly access is enforced preventing directory escape. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Other in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Pod" +digest: 8ae23309c8e49ca3e3abe72f406e9ae186564ab24468ea4e772b6f3097793892 +createdAt: "2024-04-26T15:52:10Z" + diff --git a/other-cel/limit-hostpath-vols/limit-hostpath-vols.yaml b/other-cel/limit-hostpath-vols/limit-hostpath-vols.yaml new file mode 100644 index 000000000..65d038c77 --- /dev/null +++ b/other-cel/limit-hostpath-vols/limit-hostpath-vols.yaml @@ -0,0 +1,41 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: limit-hostpath-vols + annotations: + policies.kyverno.io/title: Limit hostPath Volumes to Specific Directories in CEL expressions + policies.kyverno.io/category: Other in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kyverno-version: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + hostPath volumes consume the underlying node's file system. If hostPath volumes + are not to be universally disabled, they should be restricted to only certain + host paths so as not to allow access to sensitive information. This policy ensures + the only directory that can be mounted as a hostPath volume is /data. It is strongly + recommended to pair this policy with a second to ensure readOnly + access is enforced preventing directory escape. +spec: + background: false + validationFailureAction: Audit + rules: + - name: limit-hostpath-to-slash-data + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + celPreconditions: + - name: "has-host-path-volume" + expression: "has(object.spec.volumes) && object.spec.volumes.exists(volume, has(volume.hostPath))" + validate: + cel: + expressions: + - expression: "object.spec.volumes.all(volume, !has(volume.hostPath) || volume.hostPath.path.split('/')[1] == 'data')" + message: hostPath volumes are confined to /data. + diff --git a/other-cel/memory-requests-equal-limits/artifacthub-pkg.yml b/other-cel/memory-requests-equal-limits/artifacthub-pkg.yml index c50a6c04d..8fd967286 100644 --- a/other-cel/memory-requests-equal-limits/artifacthub-pkg.yml +++ b/other-cel/memory-requests-equal-limits/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Sample in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: 176dc9b492d3eee687bc89711d3414f13bf00548b85781e71ccaacd12bbf6f1a +digest: fc71819c4079262810e06ee768738b1061f46985df61ab688007bfdb7433be70 createdAt: "2024-04-07T11:13:21Z" diff --git a/other-cel/memory-requests-equal-limits/memory-requests-equal-limits.yaml b/other-cel/memory-requests-equal-limits/memory-requests-equal-limits.yaml index 82b23257b..bc78c62b1 100644 --- a/other-cel/memory-requests-equal-limits/memory-requests-equal-limits.yaml +++ b/other-cel/memory-requests-equal-limits/memory-requests-equal-limits.yaml @@ -23,6 +23,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: variables: diff --git a/other-cel/metadata-match-regex/artifacthub-pkg.yml b/other-cel/metadata-match-regex/artifacthub-pkg.yml index c4c2212d4..65d669de7 100644 --- a/other-cel/metadata-match-regex/artifacthub-pkg.yml +++ b/other-cel/metadata-match-regex/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Other in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod, Label" -digest: 4f6e2a07df41b3ce83af7ce25a6cdb7bae14f336edfd178bb52b25183f6c580d +digest: 2957a6e03dec3eab58436ddb3478aca69d41dbe7953c6ecb1ece1a54338856e2 createdAt: "2024-04-07T10:16:14Z" diff --git a/other-cel/metadata-match-regex/metadata-match-regex.yaml b/other-cel/metadata-match-regex/metadata-match-regex.yaml index be0c59c02..44676c893 100644 --- a/other-cel/metadata-match-regex/metadata-match-regex.yaml +++ b/other-cel/metadata-match-regex/metadata-match-regex.yaml @@ -24,6 +24,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/pdb-maxunavailable/artifacthub-pkg.yml b/other-cel/pdb-maxunavailable/artifacthub-pkg.yml index efc26a695..9fe6c2180 100644 --- a/other-cel/pdb-maxunavailable/artifacthub-pkg.yml +++ b/other-cel/pdb-maxunavailable/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Other in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "PodDisruptionBudget" -digest: 7dff4f3801bce1ca8835c5ebcadaa78e1fa41480a19958eb78aee5bbfcd6b8bf +digest: 4b452f78ab0ff9715f1454fd3ca827b7aa7a892fa2b2f23aa5c21a12851c526d createdAt: "2024-04-07T10:22:03Z" diff --git a/other-cel/pdb-maxunavailable/pdb-maxunavailable.yaml b/other-cel/pdb-maxunavailable/pdb-maxunavailable.yaml index 4c2da59ae..5b036805c 100644 --- a/other-cel/pdb-maxunavailable/pdb-maxunavailable.yaml +++ b/other-cel/pdb-maxunavailable/pdb-maxunavailable.yaml @@ -23,6 +23,9 @@ spec: - resources: kinds: - PodDisruptionBudget + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/prevent-bare-pods/artifacthub-pkg.yml b/other-cel/prevent-bare-pods/artifacthub-pkg.yml index 3917c8200..5198560a5 100644 --- a/other-cel/prevent-bare-pods/artifacthub-pkg.yml +++ b/other-cel/prevent-bare-pods/artifacthub-pkg.yml @@ -20,6 +20,6 @@ annotations: kyverno/category: "Other, EKS Best Practices in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: 325e1a068bd771c60a304f121675b9d895bcc8abacc7b48054ae4465d51fd158 +digest: ff8f3288a8e8ea57d91d27785866d0c17b8112b8697d0689e9f324874deb1f3b createdAt: "2024-04-07T10:47:32Z" diff --git a/other-cel/prevent-bare-pods/prevent-bare-pods.yaml b/other-cel/prevent-bare-pods/prevent-bare-pods.yaml index 2afe850c7..a04ad48f9 100644 --- a/other-cel/prevent-bare-pods/prevent-bare-pods.yaml +++ b/other-cel/prevent-bare-pods/prevent-bare-pods.yaml @@ -26,6 +26,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/prevent-cr8escape/artifacthub-pkg.yml b/other-cel/prevent-cr8escape/artifacthub-pkg.yml index afc130c3e..17703c6d2 100644 --- a/other-cel/prevent-cr8escape/artifacthub-pkg.yml +++ b/other-cel/prevent-cr8escape/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Other in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: 84a0f441ce5baec6060606a05f2f7f54847e79b48a38c9edc1655e6f0caf8bbf +digest: 7a684c2d2747e4ef77b44de44734c74143325757077e8047f3c89d535c5b9dfd createdAt: "2024-04-08T10:46:02Z" diff --git a/other-cel/prevent-cr8escape/prevent-cr8escape.yaml b/other-cel/prevent-cr8escape/prevent-cr8escape.yaml index dfa8e918e..f81fc062c 100644 --- a/other-cel/prevent-cr8escape/prevent-cr8escape.yaml +++ b/other-cel/prevent-cr8escape/prevent-cr8escape.yaml @@ -25,6 +25,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/require-annotations/artifacthub-pkg.yml b/other-cel/require-annotations/artifacthub-pkg.yml index eee0e31f1..83da222c3 100644 --- a/other-cel/require-annotations/artifacthub-pkg.yml +++ b/other-cel/require-annotations/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Other in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod, Annotation" -digest: dc8408d4a7a929f2f142b174a2ea06148f4dbd65ab16d70870a2213919dadf9d +digest: affed5b798321bdaac4b178887ad1a98c4fd00e9e756849dac3f0d70148f6ef1 createdAt: "2024-04-09T15:56:35Z" diff --git a/other-cel/require-annotations/require-annotations.yaml b/other-cel/require-annotations/require-annotations.yaml index 1ff29c235..1f01534b4 100644 --- a/other-cel/require-annotations/require-annotations.yaml +++ b/other-cel/require-annotations/require-annotations.yaml @@ -24,6 +24,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/require-container-port-names/.chainsaw-test/chainsaw-test.yaml b/other-cel/require-container-port-names/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..57c31e79a --- /dev/null +++ b/other-cel/require-container-port-names/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,38 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: require-container-port-names +spec: + steps: + - name: step-01 + try: + - apply: + file: ../require-container-port-names.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: require-container-port-names + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: pod-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: pod-bad.yaml + - apply: + file: podcontroller-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: podcontroller-bad.yaml diff --git a/other-cel/require-container-port-names/.chainsaw-test/pod-bad.yaml b/other-cel/require-container-port-names/.chainsaw-test/pod-bad.yaml new file mode 100644 index 000000000..f55f602a1 --- /dev/null +++ b/other-cel/require-container-port-names/.chainsaw-test/pod-bad.yaml @@ -0,0 +1,46 @@ +apiVersion: v1 +kind: Pod +metadata: + name: badpod01 +spec: + containers: + - name: busybox + image: busybox:1.35 + ports: + - containerPort: 80 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod02 +spec: + containers: + - name: busybox + image: busybox:1.35 + ports: + - name: http-port + containerPort: 80 + - name: busybox02 + image: busybox:1.35 + ports: + - containerPort: 80 + - containerPort: 443 + name: https-port +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod03 +spec: + containers: + - name: busybox + image: busybox:1.35 + ports: + - containerPort: 80 + - name: busybox02 + image: busybox:1.35 + ports: + - containerPort: 80 + name: http-port + - containerPort: 443 + name: https-port \ No newline at end of file diff --git a/other-cel/require-container-port-names/.chainsaw-test/pod-good.yaml b/other-cel/require-container-port-names/.chainsaw-test/pod-good.yaml new file mode 100644 index 000000000..0825ba43a --- /dev/null +++ b/other-cel/require-container-port-names/.chainsaw-test/pod-good.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Pod +metadata: + name: goodpod01 +spec: + containers: + - name: busybox + image: busybox:1.35 + ports: + - name: http-port + containerPort: 80 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod02 +spec: + containers: + - name: busybox + image: busybox:1.35 + ports: + - name: http-port + containerPort: 80 + - name: busybox02 + image: busybox:1.35 + ports: + - name: http-port + containerPort: 80 + - containerPort: 443 + name: https-port \ No newline at end of file diff --git a/other-cel/require-container-port-names/.chainsaw-test/podcontroller-bad.yaml b/other-cel/require-container-port-names/.chainsaw-test/podcontroller-bad.yaml new file mode 100644 index 000000000..dd9ac3feb --- /dev/null +++ b/other-cel/require-container-port-names/.chainsaw-test/podcontroller-bad.yaml @@ -0,0 +1,54 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: baddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 + ports: + - name: http-port + containerPort: 80 + - name: busybox02 + image: busybox:1.35 + ports: + - containerPort: 80 + - containerPort: 443 + name: https-port +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + spec: + containers: + - name: busybox + image: busybox:1.35 + ports: + - name: http-port + containerPort: 80 + - name: busybox02 + image: busybox:1.35 + ports: + - containerPort: 80 + - containerPort: 443 + name: https-port + restartPolicy: OnFailure \ No newline at end of file diff --git a/other-cel/require-container-port-names/.chainsaw-test/podcontroller-good.yaml b/other-cel/require-container-port-names/.chainsaw-test/podcontroller-good.yaml new file mode 100644 index 000000000..f491d3aab --- /dev/null +++ b/other-cel/require-container-port-names/.chainsaw-test/podcontroller-good.yaml @@ -0,0 +1,56 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: gooddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 + ports: + - name: http-port + containerPort: 80 + - name: busybox02 + image: busybox:1.35 + ports: + - name: http-port + containerPort: 80 + - containerPort: 443 + name: https-port +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + spec: + containers: + - name: busybox + image: busybox:1.35 + ports: + - name: http-port + containerPort: 80 + - name: busybox02 + image: busybox:1.35 + ports: + - name: http-port + containerPort: 80 + - containerPort: 443 + name: https-port + restartPolicy: OnFailure \ No newline at end of file diff --git a/other-cel/require-container-port-names/.chainsaw-test/policy-ready.yaml b/other-cel/require-container-port-names/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..f8c44c430 --- /dev/null +++ b/other-cel/require-container-port-names/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-container-port-names +status: + ready: true diff --git a/other-cel/require-container-port-names/.kyverno-test/kyverno-test.yaml b/other-cel/require-container-port-names/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..c1054a710 --- /dev/null +++ b/other-cel/require-container-port-names/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,52 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: require-container-port-names +policies: +- ../require-container-port-names.yaml +resources: +- ../.chainsaw-test/pod-bad.yaml +- ../.chainsaw-test/pod-good.yaml +- ../.chainsaw-test/podcontroller-bad.yaml +- ../.chainsaw-test/podcontroller-good.yaml +results: +- kind: Pod + policy: require-container-port-names + rule: port-name + resources: + - badpod01 + - badpod02 + - badpod03 + result: fail +- kind: Pod + policy: require-container-port-names + rule: port-name + resources: + - goodpod01 + - goodpod02 + result: pass +- kind: Deployment + policy: require-container-port-names + rule: port-name + resources: + - baddeployment01 + result: fail +- kind: CronJob + policy: require-container-port-names + rule: port-name + resources: + - badcronjob01 + result: fail +- kind: Deployment + policy: require-container-port-names + rule: port-name + resources: + - gooddeployment01 + result: pass +- kind: CronJob + policy: require-container-port-names + rule: port-name + resources: + - goodcronjob01 + result: pass + diff --git a/other-cel/require-container-port-names/artifacthub-pkg.yml b/other-cel/require-container-port-names/artifacthub-pkg.yml new file mode 100644 index 000000000..1583eaf92 --- /dev/null +++ b/other-cel/require-container-port-names/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: require-container-port-names-cel +version: 1.0.0 +displayName: Require Container Port Names in CEL expressions +description: >- + Containers may define ports on which they listen. In addition to a port number, a name field may optionally be used. Including a name makes it easier when defining Service resource definitions and others since the name may be referenced allowing the port number to change. This policy requires that for every containerPort defined there is also a name specified. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/require-container-port-names/require-container-port-names.yaml + ``` +keywords: + - kyverno + - Other + - CEL Expressions +readme: | + Containers may define ports on which they listen. In addition to a port number, a name field may optionally be used. Including a name makes it easier when defining Service resource definitions and others since the name may be referenced allowing the port number to change. This policy requires that for every containerPort defined there is also a name specified. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Other in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Pod" +digest: 62488bb402289ddbffe291c61acb14a50347e476e99d4f79ba035b4d3297403e +createdAt: "2024-04-27T16:37:39Z" + diff --git a/other-cel/require-container-port-names/require-container-port-names.yaml b/other-cel/require-container-port-names/require-container-port-names.yaml new file mode 100644 index 000000000..b2756b98e --- /dev/null +++ b/other-cel/require-container-port-names/require-container-port-names.yaml @@ -0,0 +1,36 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-container-port-names + annotations: + policies.kyverno.io/title: Require Container Port Names in CEL expressions + policies.kyverno.io/category: Other in CEL + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + Containers may define ports on which they listen. In addition to a port number, + a name field may optionally be used. Including a name makes it easier when defining + Service resource definitions and others since the name may be referenced allowing + the port number to change. This policy requires that for every containerPort defined + there is also a name specified. +spec: + validationFailureAction: Audit + background: true + rules: + - name: port-name + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "object.spec.containers.all(container, !has(container.ports) || container.ports.all(port, has(port.name)))" + message: Name is required for every containerPort. + diff --git a/other-cel/require-deployments-have-multiple-replicas/artifacthub-pkg.yml b/other-cel/require-deployments-have-multiple-replicas/artifacthub-pkg.yml index bb5f2ecbb..ddbdfb2fb 100644 --- a/other-cel/require-deployments-have-multiple-replicas/artifacthub-pkg.yml +++ b/other-cel/require-deployments-have-multiple-replicas/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Sample in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Deployment" -digest: 5c1e1b6bdb837cfba211615438cd50fa38b78c559ae43f4a791f5558f873b5d3 +digest: ee5b95668db9936b32f32f1d8ee167d4adec5c71c214981bbb503c1a5c416356 createdAt: "2024-04-09T16:03:47Z" diff --git a/other-cel/require-deployments-have-multiple-replicas/require-deployments-have-multiple-replicas.yaml b/other-cel/require-deployments-have-multiple-replicas/require-deployments-have-multiple-replicas.yaml index 5eaf97659..567e1c5c5 100644 --- a/other-cel/require-deployments-have-multiple-replicas/require-deployments-have-multiple-replicas.yaml +++ b/other-cel/require-deployments-have-multiple-replicas/require-deployments-have-multiple-replicas.yaml @@ -23,6 +23,9 @@ spec: - resources: kinds: - Deployment + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/require-emptydir-requests-limits/.chainsaw-test/bad-pod.yaml b/other-cel/require-emptydir-requests-limits/.chainsaw-test/bad-pod.yaml new file mode 100644 index 000000000..da2b7cd1a --- /dev/null +++ b/other-cel/require-emptydir-requests-limits/.chainsaw-test/bad-pod.yaml @@ -0,0 +1,48 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + app: busybox + name: badpod01 +spec: + initContainers: + - image: busybox:1.35 + name: busybox-init + volumeMounts: + - mountPath: /mnt/foo + name: foo + resources: + requests: + ephemeral-storage: "2Gi" + limits: + ephemeral-storage: "2Gi" + - image: busybox:1.35 + name: busybox02-init + volumeMounts: + - mountPath: /mnt/vol + name: vol + containers: + - image: busybox:1.35 + name: busybox + volumeMounts: + - mountPath: /mnt/foo + name: foo-host + - image: busybox:1.35 + name: busybox02 + resources: + requests: + ephemeral-storage: "2Gi" + limits: + ephemeral-storage: "2Gi" + volumeMounts: + - mountPath: /mnt/vol + name: vol + volumes: + - name: vol + emptyDir: {} + - name: foo + emptyDir: + sizeLimit: 200Mi + - name: foo-host + hostPath: + path: /var/foo \ No newline at end of file diff --git a/other-cel/require-emptydir-requests-limits/.chainsaw-test/chainsaw-test.yaml b/other-cel/require-emptydir-requests-limits/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..0f3822143 --- /dev/null +++ b/other-cel/require-emptydir-requests-limits/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,38 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: require-emptydir-requests-limits +spec: + steps: + - name: step-01 + try: + - apply: + file: ../require-emptydir-requests-limits.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: require-emptydir-requests-and-limits + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: pod-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: pod-bad.yaml + - apply: + file: podcontroller-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: podcontroller-bad.yaml diff --git a/other-cel/require-emptydir-requests-limits/.chainsaw-test/pod-bad.yaml b/other-cel/require-emptydir-requests-limits/.chainsaw-test/pod-bad.yaml new file mode 100644 index 000000000..b515750a6 --- /dev/null +++ b/other-cel/require-emptydir-requests-limits/.chainsaw-test/pod-bad.yaml @@ -0,0 +1,159 @@ +apiVersion: v1 +kind: Pod +metadata: + name: badpod01 +spec: + containers: + - image: busybox:1.35 + name: busybox + volumeMounts: + - mountPath: /mnt/foo + name: foo + - image: busybox:1.35 + name: busybox02 + resources: + requests: + ephemeral-storage: "2Gi" + limits: + ephemeral-storage: "2Gi" + volumes: + - name: foo + emptyDir: {} +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod02 +spec: + containers: + - image: busybox:1.35 + name: busybox + volumeMounts: + - mountPath: /cache/data + name: vol + volumes: + - name: vol + emptyDir: {} +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod03 +spec: + containers: + - image: busybox:1.35 + name: busybox + volumeMounts: + - mountPath: /mnt/vol + name: vol + - image: busybox:1.35 + name: busybox02 + volumeMounts: + - mountPath: /mnt/foo + name: foo + volumes: + - name: vol + emptyDir: + sizeLimit: 200Mi + - name: foo + emptyDir: {} +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod04 +spec: + initContainers: + - image: busybox:1.35 + name: busybox-init + volumeMounts: + - mountPath: /mnt/foo + name: foo + resources: + requests: + ephemeral-storage: "2Gi" + limits: + ephemeral-storage: "2Gi" + - image: busybox:1.35 + name: busybox02-init + volumeMounts: + - mountPath: /mnt/vol + name: vol + containers: + - image: busybox:1.35 + name: busybox + volumeMounts: + - mountPath: /mnt/vol + name: vol + - image: busybox:1.35 + name: busybox02 + resources: + requests: + ephemeral-storage: "2Gi" + limits: + ephemeral-storage: "2Gi" + volumeMounts: + - mountPath: /mnt/foo + name: foo + volumes: + - name: vol + emptyDir: {} + - name: foo + emptyDir: + sizeLimit: 200Mi +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod05 +spec: + containers: + - image: busybox:1.35 + name: busybox + resources: + requests: + ephemeral-storage: "2Gi" + volumeMounts: + - mountPath: /cache/data + name: vol + volumes: + - name: vol + emptyDir: {} +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod06 +spec: + containers: + - image: busybox:1.35 + name: busybox + resources: + requests: + ephemeral-storage: "2Gi" + volumeMounts: + - mountPath: /cache/data + name: vol + volumes: + - name: vol + emptyDir: {} +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod07 +spec: + containers: + - image: busybox:1.35 + name: busybox + resources: + limits: + memory: "2Gi" + requests: + memory: "2Gi" + volumeMounts: + - mountPath: /cache/data + name: vol + volumes: + - name: vol + emptyDir: {} \ No newline at end of file diff --git a/other-cel/require-emptydir-requests-limits/.chainsaw-test/pod-good.yaml b/other-cel/require-emptydir-requests-limits/.chainsaw-test/pod-good.yaml new file mode 100644 index 000000000..2af525da3 --- /dev/null +++ b/other-cel/require-emptydir-requests-limits/.chainsaw-test/pod-good.yaml @@ -0,0 +1,207 @@ +apiVersion: v1 +kind: Pod +metadata: + name: goodpod01 +spec: + containers: + - image: busybox:1.35 + name: busybox + resources: + requests: + ephemeral-storage: "2Gi" + limits: + ephemeral-storage: "2Gi" + volumeMounts: + - mountPath: /cache/data + name: vol + volumes: + - name: vol + emptyDir: {} +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod02 +spec: + containers: + - image: busybox:1.35 + name: busybox +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod03 +spec: + containers: + - image: busybox:1.35 + name: busybox + volumeMounts: + - mountPath: /cache/data + name: vol + volumes: + - name: vol + emptyDir: + sizeLimit: 200Mi +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod04 +spec: + initContainers: + - image: busybox:1.35 + name: busybox-init + volumeMounts: + - mountPath: /mnt/vol + name: vol + resources: + requests: + ephemeral-storage: "2Gi" + limits: + ephemeral-storage: "2Gi" + - image: busybox:1.35 + name: busybox02-init + volumeMounts: + - mountPath: /mnt/foo + name: foo + containers: + - image: busybox:1.35 + name: busybox + volumeMounts: + - mountPath: /mnt/foo + name: foo + - image: busybox:1.35 + name: busybox02 + resources: + requests: + ephemeral-storage: "2Gi" + limits: + ephemeral-storage: "2Gi" + volumeMounts: + - mountPath: /mnt/vol + name: vol + volumes: + - name: vol + emptyDir: {} + - name: foo + emptyDir: + sizeLimit: 200Mi +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod05 +spec: + initContainers: + - image: busybox:1.35 + name: busybox-init + - image: busybox:1.35 + name: busybox02-init + volumeMounts: + - mountPath: /mnt/foo + name: foo + containers: + - image: busybox:1.35 + name: busybox + volumeMounts: + - mountPath: /mnt/foo + name: foo + - image: busybox:1.35 + name: busybox02 + volumes: + - name: vol + emptyDir: {} + - name: foo + emptyDir: + sizeLimit: 200Mi +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod06 +spec: + containers: + - image: busybox:1.35 + name: busybox + volumeMounts: + - mountPath: /mnt/foo + name: foo + resources: + requests: + ephemeral-storage: "2Gi" + limits: + ephemeral-storage: "2Gi" + - image: busybox:1.35 + name: busybox02 + volumes: + - name: foo + emptyDir: {} +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod07 +spec: + containers: + - image: busybox:1.35 + name: busybox + volumeMounts: + - mountPath: /mnt/foo + name: foo + - image: busybox:1.35 + name: busybox02 + volumes: + - name: foo + hostPath: + path: /var/foo +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod08 +spec: + initContainers: + - name: certificates + image: busybox + volumeMounts: + - name: etc-ssl-certs + mountPath: /etc/ssl/certs + resources: + limits: + ephemeral-storage: 256Mi + requests: + ephemeral-storage: 256Mi + - name: configure + image: busybox + volumeMounts: + - name: etc-ssl-certs + mountPath: /etc/ssl/certs/ + - name: my-app-secrets + mountPath: /init-secrets + resources: + limits: + ephemeral-storage: 256Mi + requests: + ephemeral-storage: 256Mi + containers: + - name: my-app + image: busybox + resources: + limits: + cpu: "2" + ephemeral-storage: 1Gi + memory: 500Mi + requests: + ephemeral-storage: 500Mi + volumeMounts: + - name: etc-ssl-certs + mountPath: /etc/ssl/certs/ + - name: my-app-secrets + mountPath: /etc/secrets + volumes: + - name: my-app-secrets + emptyDir: + medium: Memory + - name: etc-ssl-certs + emptyDir: + medium: "Memory" \ No newline at end of file diff --git a/other-cel/require-emptydir-requests-limits/.chainsaw-test/podcontroller-bad.yaml b/other-cel/require-emptydir-requests-limits/.chainsaw-test/podcontroller-bad.yaml new file mode 100644 index 000000000..d32c2c22d --- /dev/null +++ b/other-cel/require-emptydir-requests-limits/.chainsaw-test/podcontroller-bad.yaml @@ -0,0 +1,104 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: baddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + initContainers: + - image: busybox:1.35 + name: busybox-init + volumeMounts: + - mountPath: /mnt/foo + name: foo + - image: busybox:1.35 + name: busybox02-init + volumeMounts: + - mountPath: /mnt/vol + name: vol + containers: + - image: busybox:1.35 + name: busybox + volumeMounts: + - mountPath: /mnt/foo + name: foo-host + - image: busybox:1.35 + name: busybox02 + resources: + requests: + ephemeral-storage: "2Gi" + limits: + ephemeral-storage: "2Gi" + volumeMounts: + - mountPath: /mnt/vol + name: vol + volumes: + - name: vol + emptyDir: {} + - name: foo + emptyDir: + sizeLimit: 200Mi + - name: foo-host + hostPath: + path: /var/foo +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + spec: + initContainers: + - image: busybox:1.35 + name: busybox-init + volumeMounts: + - mountPath: /mnt/foo + name: foo + resources: + requests: + ephemeral-storage: "2Gi" + limits: + ephemeral-storage: "2Gi" + - image: busybox:1.35 + name: busybox02-init + volumeMounts: + - mountPath: /mnt/vol + name: vol + containers: + - image: busybox:1.35 + name: busybox + volumeMounts: + - mountPath: /mnt/foo + name: foo-host + - image: busybox:1.35 + name: busybox02 + resources: + requests: + memory: "2Gi" + volumeMounts: + - mountPath: /mnt/vol + name: vol + volumes: + - name: vol + emptyDir: {} + - name: foo + emptyDir: + sizeLimit: 200Mi + - name: foo-host + hostPath: + path: /var/foo + restartPolicy: OnFailure \ No newline at end of file diff --git a/other-cel/require-emptydir-requests-limits/.chainsaw-test/podcontroller-good.yaml b/other-cel/require-emptydir-requests-limits/.chainsaw-test/podcontroller-good.yaml new file mode 100644 index 000000000..70b656486 --- /dev/null +++ b/other-cel/require-emptydir-requests-limits/.chainsaw-test/podcontroller-good.yaml @@ -0,0 +1,111 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: gooddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + initContainers: + - image: busybox:1.35 + name: busybox-init + volumeMounts: + - mountPath: /mnt/vol + name: vol + resources: + requests: + ephemeral-storage: "2Gi" + limits: + ephemeral-storage: "2Gi" + - image: busybox:1.35 + name: busybox02-init + volumeMounts: + - mountPath: /mnt/foo + name: foo-host + containers: + - image: busybox:1.35 + name: busybox + volumeMounts: + - mountPath: /mnt/foo + name: foo + - image: busybox:1.35 + name: busybox02 + resources: + requests: + ephemeral-storage: "2Gi" + limits: + ephemeral-storage: "2Gi" + volumeMounts: + - mountPath: /mnt/vol + name: vol + volumes: + - name: vol + emptyDir: {} + - name: foo + emptyDir: + sizeLimit: 200Mi + - name: foo-host + hostPath: + path: /var/foo +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + spec: + initContainers: + - image: busybox:1.35 + name: busybox-init + volumeMounts: + - mountPath: /mnt/vol + name: vol + resources: + requests: + ephemeral-storage: "2Gi" + limits: + ephemeral-storage: "2Gi" + - image: busybox:1.35 + name: busybox02-init + volumeMounts: + - mountPath: /mnt/foo + name: foo + containers: + - image: busybox:1.35 + name: busybox + volumeMounts: + - mountPath: /mnt/foo + name: foo + - image: busybox:1.35 + name: busybox02 + resources: + requests: + ephemeral-storage: "2Gi" + limits: + ephemeral-storage: "2Gi" + volumeMounts: + - mountPath: /mnt/vol + name: vol + volumes: + - name: vol + emptyDir: {} + - name: foo + emptyDir: + sizeLimit: 200Mi + - name: foo-host + hostPath: + path: /var/foo + restartPolicy: OnFailure \ No newline at end of file diff --git a/other-cel/require-emptydir-requests-limits/.chainsaw-test/policy-ready.yaml b/other-cel/require-emptydir-requests-limits/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..9aa39646d --- /dev/null +++ b/other-cel/require-emptydir-requests-limits/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-emptydir-requests-and-limits +status: + ready: true diff --git a/other-cel/require-emptydir-requests-limits/.kyverno-test/kyverno-test.yaml b/other-cel/require-emptydir-requests-limits/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..b97a5b0ec --- /dev/null +++ b/other-cel/require-emptydir-requests-limits/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,30 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: require-emptydir-requests-and-limits +policies: +- ../require-emptydir-requests-limits.yaml +resources: +- resource-fail.yaml +- resource-pass.yaml +- resource-skip.yaml +results: +- kind: Pod + policy: require-emptydir-requests-and-limits + resources: + - fail-pod + result: fail + rule: check-emptydir-requests-limits +- kind: Pod + policy: require-emptydir-requests-and-limits + resources: + - pass-pod-01 + - pass-pod-02 + result: pass + rule: check-emptydir-requests-limits +- kind: Pod + policy: require-emptydir-requests-and-limits + resources: + - skip-pod + result: skip + rule: check-emptydir-requests-limits diff --git a/other-cel/require-emptydir-requests-limits/.kyverno-test/resource-fail.yaml b/other-cel/require-emptydir-requests-limits/.kyverno-test/resource-fail.yaml new file mode 100644 index 000000000..29b724bbc --- /dev/null +++ b/other-cel/require-emptydir-requests-limits/.kyverno-test/resource-fail.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: fail-pod +spec: + containers: + - image: gcr.io/hello-world:1.0 + name: test + resources: {} + volumeMounts: + - mountPath: /cache/data + name: vol + volumes: + - name: vol + emptyDir: {} diff --git a/other-cel/require-emptydir-requests-limits/.kyverno-test/resource-pass.yaml b/other-cel/require-emptydir-requests-limits/.kyverno-test/resource-pass.yaml new file mode 100644 index 000000000..b614d4688 --- /dev/null +++ b/other-cel/require-emptydir-requests-limits/.kyverno-test/resource-pass.yaml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: Pod +metadata: + name: pass-pod-01 +spec: + containers: + - image: gcr.io/hello-world:1.0 + name: test + resources: + requests: + ephemeral-storage: "2Gi" + limits: + ephemeral-storage: "2Gi" + volumeMounts: + - mountPath: /cache/data + name: vol + volumes: + - name: vol + emptyDir: {} +--- +apiVersion: v1 +kind: Pod +metadata: + name: pass-pod-02 +spec: + containers: + - image: gcr.io/hello-world:1.0 + name: test + volumeMounts: + - mountPath: /cache/data + name: vol + - mountPath: /cache/data2 + name: vo2 + volumes: + - name: vol + emptyDir: + sizeLimit: 1Gi + diff --git a/other-cel/require-emptydir-requests-limits/.kyverno-test/resource-skip.yaml b/other-cel/require-emptydir-requests-limits/.kyverno-test/resource-skip.yaml new file mode 100644 index 000000000..8c2736f3f --- /dev/null +++ b/other-cel/require-emptydir-requests-limits/.kyverno-test/resource-skip.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Pod +metadata: + name: skip-pod +spec: + containers: + - image: gcr.io/hello-world:1.0 + name: test + volumeMounts: + - mountPath: /cache/data + name: vol + - mountPath: /cache/data2 + name: vol2 + volumes: + - name: vol + hostPath: + path: /mnt/data + - name: vol2 + hostPath: + path: /mnt/data2 diff --git a/other-cel/require-emptydir-requests-limits/artifacthub-pkg.yml b/other-cel/require-emptydir-requests-limits/artifacthub-pkg.yml new file mode 100644 index 000000000..859a6e8b3 --- /dev/null +++ b/other-cel/require-emptydir-requests-limits/artifacthub-pkg.yml @@ -0,0 +1,23 @@ +name: require-emptydir-requests-limits-cel +version: 1.0.0 +displayName: Require Requests and Limits for emptyDir in CEL expressions +description: >- + Pods which mount emptyDir volumes may be allowed to potentially overrun the medium backing the emptyDir volume. This sample ensures that any initContainers or containers mounting an emptyDir volume have ephemeral-storage requests and limits set. Policy will be skipped if the volume has already a sizeLimit set. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/require-emptydir-requests-limits/require-emptydir-requests-limits.yaml + ``` +keywords: + - kyverno + - Other + - CEL Expressions +readme: | + Pods which mount emptyDir volumes may be allowed to potentially overrun the medium backing the emptyDir volume. This sample ensures that any initContainers or containers mounting an emptyDir volume have ephemeral-storage requests and limits set. Policy will be skipped if the volume has already a sizeLimit set. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Other in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Pod" +digest: 8915013155bfb12e6414848c9dec66a9e95ab7318f5da7d0c64bc621143e5383 +createdAt: "2024-05-19T10:11:00Z" diff --git a/other-cel/require-emptydir-requests-limits/require-emptydir-requests-limits.yaml b/other-cel/require-emptydir-requests-limits/require-emptydir-requests-limits.yaml new file mode 100644 index 000000000..bc3cc0b67 --- /dev/null +++ b/other-cel/require-emptydir-requests-limits/require-emptydir-requests-limits.yaml @@ -0,0 +1,51 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-emptydir-requests-and-limits + annotations: + policies.kyverno.io/title: Require Requests and Limits for emptyDir in CEL expressions + policies.kyverno.io/category: Other in CEL + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.12.1 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + Pods which mount emptyDir volumes may be allowed to potentially overrun + the medium backing the emptyDir volume. This sample ensures that any + initContainers or containers mounting an emptyDir volume have + ephemeral-storage requests and limits set. Policy will be skipped if + the volume has already a sizeLimit set. +spec: + background: false + validationFailureAction: Audit + rules: + - name: check-emptydir-requests-limits + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + celPreconditions: + - name: "has-emptydir-volume" + expression: "has(object.spec.volumes) && object.spec.volumes.exists(volume, has(volume.emptyDir))" + validate: + cel: + variables: + - name: containers + expression: "object.spec.containers + (has(object.spec.initContainers) ? object.spec.initContainers : [])" + - name: emptydirnames + expression: >- + has(object.spec.volumes) ? + object.spec.volumes.filter(volume, has(volume.emptyDir) && !has(volume.emptyDir.sizeLimit)).map(volume, volume.name) : [] + expressions: + - expression: >- + variables.containers.all(container, + !has(container.volumeMounts) || + !container.volumeMounts.exists(mount, mount.name in variables.emptydirnames) || + container.resources.?requests[?'ephemeral-storage'].hasValue() && + container.resources.?limits[?'ephemeral-storage'].hasValue()) + message: Containers mounting emptyDir volumes must specify requests and limits for ephemeral-storage. + diff --git a/other-cel/require-image-checksum/artifacthub-pkg.yml b/other-cel/require-image-checksum/artifacthub-pkg.yml index 0ebbe4a86..d99ae75b5 100644 --- a/other-cel/require-image-checksum/artifacthub-pkg.yml +++ b/other-cel/require-image-checksum/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Sample in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: e9c64e577e3f4d255489ce34191ee29c4180fae774a60eceaff1d12c1e716891 +digest: 6a775c3ab5b2c24f6fbe10de35ecca20e967d3d70242403718b55f5a04c07c08 createdAt: "2024-04-10T18:21:59Z" diff --git a/other-cel/require-image-checksum/require-image-checksum.yaml b/other-cel/require-image-checksum/require-image-checksum.yaml index 43f87b64c..29c181758 100644 --- a/other-cel/require-image-checksum/require-image-checksum.yaml +++ b/other-cel/require-image-checksum/require-image-checksum.yaml @@ -23,6 +23,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/require-ingress-https/artifacthub-pkg.yml b/other-cel/require-ingress-https/artifacthub-pkg.yml index 711e1b678..6712dedf5 100644 --- a/other-cel/require-ingress-https/artifacthub-pkg.yml +++ b/other-cel/require-ingress-https/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Other in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Ingress" -digest: fb50d3603fbd348e84ce2e64c06e313c9c028daa640893f3c95a9e28c27687c0 +digest: 56fca07a343423b529d0cd1e27069ca705b60cc1f590c832b2467c757b1b6957 createdAt: "2024-04-10T18:31:27Z" diff --git a/other-cel/require-ingress-https/require-ingress-https.yaml b/other-cel/require-ingress-https/require-ingress-https.yaml index e2d8d866c..36268e98d 100644 --- a/other-cel/require-ingress-https/require-ingress-https.yaml +++ b/other-cel/require-ingress-https/require-ingress-https.yaml @@ -24,6 +24,9 @@ spec: - resources: kinds: - Ingress + operations: + - CREATE + - UPDATE validate: cel: expressions: @@ -38,6 +41,9 @@ spec: - resources: kinds: - Ingress + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/require-non-root-groups/.chainsaw-test/chainsaw-test.yaml b/other-cel/require-non-root-groups/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..776c5d107 --- /dev/null +++ b/other-cel/require-non-root-groups/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,40 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: require-non-root-groups +spec: + # disable templating because it can cause issues with CEL expressions + template: false + steps: + - name: step-01 + try: + - apply: + file: ../require-non-root-groups.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: require-non-root-groups + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: pod-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: pod-bad.yaml + - apply: + file: podcontroller-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: podcontroller-bad.yaml diff --git a/other-cel/require-non-root-groups/.chainsaw-test/pod-bad.yaml b/other-cel/require-non-root-groups/.chainsaw-test/pod-bad.yaml new file mode 100644 index 000000000..af72489a1 --- /dev/null +++ b/other-cel/require-non-root-groups/.chainsaw-test/pod-bad.yaml @@ -0,0 +1,246 @@ +apiVersion: v1 +kind: Pod +metadata: + name: badpod01 +spec: + containers: + - name: container01 + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod02 +spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 0 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod03 +spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 0 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod04 +spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 0 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod05 +spec: + containers: + - name: container01 + image: busybox:1.35 + - name: container02 + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod06 +spec: + containers: + - name: container01 + image: busybox:1.35 + - name: container02 + image: busybox:1.35 + securityContext: + runAsGroup: 0 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod07 +spec: + containers: + - name: container01 + image: busybox:1.35 + - name: container02 + image: busybox:1.35 + securityContext: + runAsGroup: 0 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod08 +spec: + containers: + - name: container01 + image: busybox:1.35 + - name: container02 + image: busybox:1.35 + securityContext: + runAsGroup: 0 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod09 +spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + - name: container02 + image: busybox:1.35 + securityContext: + runAsGroup: 0 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod10 +spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + securityContext: + runAsGroup: 0 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod11 +spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 0 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod12 +spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + securityContext: + runAsGroup: 0 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod13 +spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + - name: initcontainer02 + image: busybox:1.35 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod14 +spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + - name: initcontainer02 + image: busybox:1.35 + securityContext: + runAsGroup: 0 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod15 +spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + - name: initcontainer02 + image: busybox:1.35 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 0 +--- +apiVersion: v1 +kind: Pod +metadata: + name: supgrp-badpod01 +spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + supplementalGroups: [0] +--- +apiVersion: v1 +kind: Pod +metadata: + name: supgrp-badpod02 +spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + supplementalGroups: [14,0] +--- +apiVersion: v1 +kind: Pod +metadata: + name: fsgrp-badpod01 +spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + fsGroup: 0 +--- diff --git a/other-cel/require-non-root-groups/.chainsaw-test/pod-good.yaml b/other-cel/require-non-root-groups/.chainsaw-test/pod-good.yaml new file mode 100644 index 000000000..712cd7520 --- /dev/null +++ b/other-cel/require-non-root-groups/.chainsaw-test/pod-good.yaml @@ -0,0 +1,182 @@ +apiVersion: v1 +kind: Pod +metadata: + name: goodpod01 +spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod02 +spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod03 +spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod04 +spec: + containers: + - name: container01 + image: busybox:1.35 + - name: container02 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod05 +spec: + containers: + - name: container01 + image: busybox:1.35 + - name: container02 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod06 +spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod07 +spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod08 +spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod09 +spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + - name: initcontainer02 + image: busybox:1.35 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod10 +spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + - name: initcontainer02 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: supgrp-goodpod02 +spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + supplementalGroups: [32] +--- +apiVersion: v1 +kind: Pod +metadata: + name: supgrp-goodpod03 +spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + supplementalGroups: [32,94] +--- +apiVersion: v1 +kind: Pod +metadata: + name: fsgrp-goodpod02 +spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + fsGroup: 32 \ No newline at end of file diff --git a/other-cel/require-non-root-groups/.chainsaw-test/podcontroller-bad.yaml b/other-cel/require-non-root-groups/.chainsaw-test/podcontroller-bad.yaml new file mode 100644 index 000000000..5f56fe9c0 --- /dev/null +++ b/other-cel/require-non-root-groups/.chainsaw-test/podcontroller-bad.yaml @@ -0,0 +1,761 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment02 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 0 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment03 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 0 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment04 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 0 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment05 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 + - name: container02 + image: busybox:1.35 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment06 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 + - name: container02 + image: busybox:1.35 + securityContext: + runAsGroup: 0 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment07 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 + - name: container02 + image: busybox:1.35 + securityContext: + runAsGroup: 0 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment08 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 + - name: container02 + image: busybox:1.35 + securityContext: + runAsGroup: 0 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment09 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + - name: container02 + image: busybox:1.35 + securityContext: + runAsGroup: 0 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment10 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + securityContext: + runAsGroup: 0 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment11 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 0 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment12 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + securityContext: + runAsGroup: 0 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment13 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + - name: initcontainer02 + image: busybox:1.35 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment14 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + - name: initcontainer02 + image: busybox:1.35 + securityContext: + runAsGroup: 0 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment15 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + - name: initcontainer02 + image: busybox:1.35 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 0 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob01 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob02 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 0 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob03 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 0 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob04 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 0 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob05 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 + - name: container02 + image: busybox:1.35 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob06 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 + - name: container02 + image: busybox:1.35 + securityContext: + runAsGroup: 0 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob07 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 + - name: container02 + image: busybox:1.35 + securityContext: + runAsGroup: 0 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob08 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 + - name: container02 + image: busybox:1.35 + securityContext: + runAsGroup: 0 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob09 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + - name: container02 + image: busybox:1.35 + securityContext: + runAsGroup: 0 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob10 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: busybox:1.35 + securityContext: + runAsGroup: 0 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob11 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: busybox:1.35 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 0 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob12 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: busybox:1.35 + securityContext: + runAsGroup: 0 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob13 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: busybox:1.35 + - name: initcontainer02 + image: busybox:1.35 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob14 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: busybox:1.35 + - name: initcontainer02 + image: busybox:1.35 + securityContext: + runAsGroup: 0 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob15 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: busybox:1.35 + - name: initcontainer02 + image: busybox:1.35 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 0 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: supgrp-baddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + supplementalGroups: [0] +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: supgrp-baddeployment02 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + supplementalGroups: [14,0] +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: supgrp-badcronjob01 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 + securityContext: + supplementalGroups: [0] +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: supgrp-badcronjob02 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 + securityContext: + supplementalGroups: [14,0] +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: fsgrp-baddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + fsGroup: 0 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: fsgrp-badcronjob01 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 + securityContext: + fsGroup: 0 \ No newline at end of file diff --git a/other-cel/require-non-root-groups/.chainsaw-test/podcontroller-good.yaml b/other-cel/require-non-root-groups/.chainsaw-test/podcontroller-good.yaml new file mode 100644 index 000000000..221eae1a4 --- /dev/null +++ b/other-cel/require-non-root-groups/.chainsaw-test/podcontroller-good.yaml @@ -0,0 +1,561 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment02 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment03 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment04 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 + - name: container02 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment05 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 + - name: container02 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment06 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment07 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment08 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment09 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + - name: initcontainer02 + image: busybox:1.35 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment10 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + - name: initcontainer02 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +#### CRONJOBS +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob01 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob02 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob03 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob04 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 + - name: container02 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob05 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 + - name: container02 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob06 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: busybox:1.35 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob07 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob08 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob09 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + - name: initcontainer02 + image: busybox:1.35 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob10 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + - name: initcontainer02 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: supgrp-gooddeployment02 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + supplementalGroups: [32] +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: supgrp-gooddeployment03 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + supplementalGroups: [32,94] +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: supgrp-goodcronjob02 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + supplementalGroups: [32] +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: supgrp-goodcronjob03 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + supplementalGroups: [32,94] +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: fsgrp-gooddeployment02 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + fsGroup: 32 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: fsgrp-goodcronjob02 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: busybox:1.35 + securityContext: + runAsGroup: 1 + fsGroup: 32 diff --git a/other-cel/require-non-root-groups/.chainsaw-test/policy-ready.yaml b/other-cel/require-non-root-groups/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..b51263787 --- /dev/null +++ b/other-cel/require-non-root-groups/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-non-root-groups +status: + ready: true diff --git a/other-cel/require-non-root-groups/.kyverno-test/kyverno-test.yaml b/other-cel/require-non-root-groups/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..b477f9589 --- /dev/null +++ b/other-cel/require-non-root-groups/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,198 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: require-non-root-groups +policies: +- ../require-non-root-groups.yaml +resources: +- resource.yaml +results: +- kind: CronJob + policy: require-non-root-groups + resources: + - fsgrp-badcronjob01 + result: fail + rule: check-fsgroup +- kind: Deployment + policy: require-non-root-groups + resources: + - fsgrp-baddeployment01 + result: fail + rule: check-fsgroup +- kind: Pod + policy: require-non-root-groups + resources: + - fsgrp-badpod01 + result: fail + rule: check-fsgroup +- kind: CronJob + policy: require-non-root-groups + resources: + - fsgrp-goodcronjob01 + - fsgrp-goodcronjob02 + result: pass + rule: check-fsgroup +- kind: Deployment + policy: require-non-root-groups + resources: + - fsgrp-gooddeployment01 + - fsgrp-gooddeployment02 + result: pass + rule: check-fsgroup +- kind: Pod + policy: require-non-root-groups + resources: + - fsgrp-goodpod01 + - fsgrp-goodpod02 + result: pass + rule: check-fsgroup +- kind: CronJob + policy: require-non-root-groups + resources: + - badcronjob01 + - badcronjob02 + - badcronjob03 + - badcronjob04 + - badcronjob05 + - badcronjob06 + - badcronjob07 + - badcronjob08 + - badcronjob09 + - badcronjob10 + - badcronjob11 + - badcronjob12 + - badcronjob13 + - badcronjob14 + - badcronjob15 + result: fail + rule: check-runasgroup +- kind: Deployment + policy: require-non-root-groups + resources: + - baddeployment01 + - baddeployment02 + - baddeployment03 + - baddeployment04 + - baddeployment05 + - baddeployment06 + - baddeployment07 + - baddeployment08 + - baddeployment09 + - baddeployment10 + - baddeployment11 + - baddeployment12 + - baddeployment13 + - baddeployment14 + - baddeployment15 + result: fail + rule: check-runasgroup +- kind: Pod + policy: require-non-root-groups + resources: + - badpod01 + - badpod02 + - badpod03 + - badpod04 + - badpod05 + - badpod06 + - badpod07 + - badpod08 + - badpod09 + - badpod10 + - badpod11 + - badpod12 + - badpod13 + - badpod14 + - badpod15 + result: fail + rule: check-runasgroup +- kind: CronJob + policy: require-non-root-groups + resources: + - goodcronjob01 + - goodcronjob02 + - goodcronjob03 + - goodcronjob04 + - goodcronjob05 + - goodcronjob06 + - goodcronjob07 + - goodcronjob08 + - goodcronjob09 + - goodcronjob10 + result: pass + rule: check-runasgroup +- kind: Deployment + policy: require-non-root-groups + resources: + - gooddeployment01 + - gooddeployment02 + - gooddeployment03 + - gooddeployment04 + - gooddeployment05 + - gooddeployment06 + - gooddeployment07 + - gooddeployment08 + - gooddeployment09 + - gooddeployment10 + result: pass + rule: check-runasgroup +- kind: Pod + policy: require-non-root-groups + resources: + - goodpod01 + - goodpod02 + - goodpod03 + - goodpod04 + - goodpod05 + - goodpod06 + - goodpod07 + - goodpod08 + - goodpod09 + - goodpod10 + result: pass + rule: check-runasgroup +- kind: CronJob + policy: require-non-root-groups + resources: + - supgrp-badcronjob01 + - supgrp-badcronjob02 + result: fail + rule: check-supplementalgroups +- kind: Deployment + policy: require-non-root-groups + resources: + - supgrp-baddeployment01 + - supgrp-baddeployment02 + result: fail + rule: check-supplementalgroups +- kind: Pod + policy: require-non-root-groups + resources: + - supgrp-badpod01 + - supgrp-badpod02 + result: fail + rule: check-supplementalgroups +- kind: CronJob + policy: require-non-root-groups + resources: + - supgrp-goodcronjob01 + - supgrp-goodcronjob02 + - supgrp-goodcronjob03 + result: pass + rule: check-supplementalgroups +- kind: Deployment + policy: require-non-root-groups + resources: + - supgrp-gooddeployment01 + - supgrp-gooddeployment02 + - supgrp-gooddeployment03 + result: pass + rule: check-supplementalgroups +- kind: Pod + policy: require-non-root-groups + resources: + - supgrp-goodpod01 + - supgrp-goodpod02 + - supgrp-goodpod03 + result: pass + rule: check-supplementalgroups diff --git a/other-cel/require-non-root-groups/.kyverno-test/resource.yaml b/other-cel/require-non-root-groups/.kyverno-test/resource.yaml new file mode 100644 index 000000000..97269bf53 --- /dev/null +++ b/other-cel/require-non-root-groups/.kyverno-test/resource.yaml @@ -0,0 +1,1854 @@ +############################ +## Rule: check-runasgroup ## +############################ +###### Pods - Bad +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod01 +spec: + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod02 +spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 0 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod03 +spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 0 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod04 +spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 0 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod05 +spec: + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod06 +spec: + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + runAsGroup: 0 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod07 +spec: + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + runAsGroup: 0 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod08 +spec: + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + runAsGroup: 0 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod09 +spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 + - name: container02 + image: dummyimagename + securityContext: + runAsGroup: 0 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod10 +spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + runAsGroup: 0 + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod11 +spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 0 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod12 +spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + runAsGroup: 0 + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod13 +spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + - name: initcontainer02 + image: dummyimagename + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod14 +spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + - name: initcontainer02 + image: dummyimagename + securityContext: + runAsGroup: 0 + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod15 +spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + - name: initcontainer02 + image: dummyimagename + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 0 +--- +###### Pods - Good +apiVersion: v1 +kind: Pod +metadata: + name: goodpod01 +spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod02 +spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod03 +spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod04 +spec: + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod05 +spec: + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod06 +spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod07 +spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + runAsGroup: 1 + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod08 +spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + runAsGroup: 1 + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod09 +spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + runAsGroup: 1 + - name: initcontainer02 + image: dummyimagename + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod10 +spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + runAsGroup: 1 + - name: initcontainer02 + image: dummyimagename + securityContext: + runAsGroup: 1 + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +###### Deployments - Bad +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment02 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 0 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment03 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 0 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment04 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 0 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment05 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment06 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + runAsGroup: 0 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment07 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + runAsGroup: 0 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment08 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + runAsGroup: 0 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment09 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 + - name: container02 + image: dummyimagename + securityContext: + runAsGroup: 0 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment10 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + runAsGroup: 0 + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment11 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 0 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment12 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + runAsGroup: 0 + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment13 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + - name: initcontainer02 + image: dummyimagename + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment14 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + - name: initcontainer02 + image: dummyimagename + securityContext: + runAsGroup: 0 + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment15 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + - name: initcontainer02 + image: dummyimagename + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 0 +--- +###### Deployments - Good +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment02 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment03 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment04 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment05 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment06 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment07 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + runAsGroup: 1 + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment08 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + runAsGroup: 1 + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment09 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + runAsGroup: 1 + - name: initcontainer02 + image: dummyimagename + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment10 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + runAsGroup: 1 + - name: initcontainer02 + image: dummyimagename + securityContext: + runAsGroup: 1 + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +###### CronJobs - Bad +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob01 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob02 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 0 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob03 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 0 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob04 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 0 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob05 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob06 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + runAsGroup: 0 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob07 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + runAsGroup: 0 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob08 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + runAsGroup: 0 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob09 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 + - name: container02 + image: dummyimagename + securityContext: + runAsGroup: 0 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob10 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + runAsGroup: 0 + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob11 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: dummyimagename + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 0 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob12 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + runAsGroup: 0 + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob13 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: dummyimagename + - name: initcontainer02 + image: dummyimagename + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob14 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: dummyimagename + - name: initcontainer02 + image: dummyimagename + securityContext: + runAsGroup: 0 + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob15 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: dummyimagename + - name: initcontainer02 + image: dummyimagename + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 0 +--- +###### CronJobs - Good +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob01 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob02 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob03 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob04 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob05 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + runAsGroup: 1 + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob06 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: dummyimagename + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob07 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + runAsGroup: 1 + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob08 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + runAsGroup: 1 + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob09 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + runAsGroup: 1 + - name: initcontainer02 + image: dummyimagename + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob10 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + runAsGroup: 1 + - name: initcontainer02 + image: dummyimagename + securityContext: + runAsGroup: 1 + containers: + - name: container01 + image: dummyimagename + securityContext: + runAsGroup: 1 +#################################### +## Rule: check-supplementalgroups ## +#################################### +###### Pods - Bad +--- +apiVersion: v1 +kind: Pod +metadata: + name: supgrp-badpod01 +spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + supplementalGroups: [0] +--- +apiVersion: v1 +kind: Pod +metadata: + name: supgrp-badpod02 +spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + supplementalGroups: [14,0] +--- +###### Pods - Good +apiVersion: v1 +kind: Pod +metadata: + name: supgrp-goodpod01 +spec: + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: v1 +kind: Pod +metadata: + name: supgrp-goodpod02 +spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + supplementalGroups: [32] +--- +apiVersion: v1 +kind: Pod +metadata: + name: supgrp-goodpod03 +spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + supplementalGroups: [32,94] +--- +###### Deployments - Bad +apiVersion: apps/v1 +kind: Deployment +metadata: + name: supgrp-baddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + supplementalGroups: [0] +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: supgrp-baddeployment02 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + supplementalGroups: [14,0] +--- +###### Deployments - Good +apiVersion: apps/v1 +kind: Deployment +metadata: + name: supgrp-gooddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: supgrp-gooddeployment02 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + supplementalGroups: [32] +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: supgrp-gooddeployment03 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + supplementalGroups: [32,94] +--- +###### CronJobs - Bad +apiVersion: batch/v1 +kind: CronJob +metadata: + name: supgrp-badcronjob01 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + securityContext: + supplementalGroups: [0] +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: supgrp-badcronjob02 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + securityContext: + supplementalGroups: [14,0] +--- +###### CronJobs - Good +apiVersion: batch/v1 +kind: CronJob +metadata: + name: supgrp-goodcronjob01 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: supgrp-goodcronjob02 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + securityContext: + supplementalGroups: [32] +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: supgrp-goodcronjob03 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + securityContext: + supplementalGroups: [32,94] +--- +######################### +## Rule: check-fsgroup ## +######################### +###### Pods - Bad +apiVersion: v1 +kind: Pod +metadata: + name: fsgrp-badpod01 +spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + fsGroup: 0 +--- +###### Pods - Good +apiVersion: v1 +kind: Pod +metadata: + name: fsgrp-goodpod01 +spec: + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: v1 +kind: Pod +metadata: + name: fsgrp-goodpod02 +spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + fsGroup: 32 +--- +###### Deployments - Bad +apiVersion: apps/v1 +kind: Deployment +metadata: + name: fsgrp-baddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + fsGroup: 0 +--- +###### Deployments - Good +apiVersion: apps/v1 +kind: Deployment +metadata: + name: fsgrp-gooddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: fsgrp-gooddeployment02 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + fsGroup: 32 +--- +###### CronJobs - Bad +apiVersion: batch/v1 +kind: CronJob +metadata: + name: fsgrp-badcronjob01 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + securityContext: + fsGroup: 0 +--- +###### CronJobs - Good +apiVersion: batch/v1 +kind: CronJob +metadata: + name: fsgrp-goodcronjob01 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: fsgrp-goodcronjob02 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + securityContext: + fsGroup: 32 diff --git a/other-cel/require-non-root-groups/artifacthub-pkg.yml b/other-cel/require-non-root-groups/artifacthub-pkg.yml new file mode 100644 index 000000000..6c9a54a0e --- /dev/null +++ b/other-cel/require-non-root-groups/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: require-non-root-groups-cel +version: 1.0.0 +displayName: Require Non-Root Groups in CEL expressions +description: >- + Containers should be forbidden from running with a root primary or supplementary GID. This policy ensures the `runAsGroup`, `supplementalGroups`, and `fsGroup` fields are set to a number greater than zero (i.e., non root). A known issue prevents a policy such as this using `anyPattern` from being persisted properly in Kubernetes 1.23.0-1.23.2. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/require-non-root-groups/require-non-root-groups.yaml + ``` +keywords: + - kyverno + - Sample + - EKS Best Practices + - CEL Expressions +readme: | + Containers should be forbidden from running with a root primary or supplementary GID. This policy ensures the `runAsGroup`, `supplementalGroups`, and `fsGroup` fields are set to a number greater than zero (i.e., non root). A known issue prevents a policy such as this using `anyPattern` from being persisted properly in Kubernetes 1.23.0-1.23.2. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Sample, EKS Best Practices in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Pod" +digest: b2f00c69719c2f91584551c65a0809e4d2d2e691030b41aa3bf80cdcb6e45320 +createdAt: "2024-05-19T10:49:49Z" diff --git a/other-cel/require-non-root-groups/require-non-root-groups.yaml b/other-cel/require-non-root-groups/require-non-root-groups.yaml new file mode 100644 index 000000000..4f0f77b7d --- /dev/null +++ b/other-cel/require-non-root-groups/require-non-root-groups.yaml @@ -0,0 +1,88 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-non-root-groups + annotations: + policies.kyverno.io/title: Require Non-Root Groups in CEL expressions + policies.kyverno.io/category: Sample, EKS Best Practices in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kyverno-version: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + Containers should be forbidden from running with a root primary or supplementary GID. + This policy ensures the `runAsGroup`, `supplementalGroups`, and `fsGroup` fields are set to a number + greater than zero (i.e., non root). A known issue prevents a policy such as this + using `anyPattern` from being persisted properly in Kubernetes 1.23.0-1.23.2. +spec: + validationFailureAction: Audit + background: true + rules: + - name: check-runasgroup + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + variables: + - name: allContainers + expression: "(object.spec.containers + (has(object.spec.initContainers) ? object.spec.initContainers : []) + (has(object.spec.ephemeralContainers) ? object.spec.ephemeralContainers : []))" + expressions: + - expression: >- + ( + has(object.spec.securityContext) && has(object.spec.securityContext.runAsGroup) && (object.spec.securityContext.runAsGroup > 0) && + variables.allContainers.all(container, !has(container.securityContext) || !has(container.securityContext.runAsGroup) || container.securityContext.runAsGroup > 0) + ) || + ( + variables.allContainers.all(container, has(container.securityContext) && has(container.securityContext.runAsGroup) && container.securityContext.runAsGroup > 0) + ) + message: >- + Running with root group IDs is disallowed. The fields + spec.securityContext.runAsGroup, spec.containers[*].securityContext.runAsGroup, + spec.initContainers[*].securityContext.runAsGroup, and + spec.ephemeralContainers[*].securityContext.runAsGroup must be + set to a value greater than zero. + - name: check-supplementalgroups + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + !has(object.spec.securityContext) || !has(object.spec.securityContext.supplementalGroups) || + object.spec.securityContext.supplementalGroups.all(group, group > 0) + message: >- + Containers cannot run with a root primary or supplementary GID. The field + spec.securityContext.supplementalGroups must be unset or + set to a value greater than zero. + - name: check-fsgroup + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + !has(object.spec.securityContext) || !has(object.spec.securityContext.fsGroup) || + object.spec.securityContext.fsGroup > 0 + message: >- + Containers cannot run with a root primary or supplementary GID. The field + spec.securityContext.fsGroup must be unset or set to a value greater than zero. + diff --git a/other-cel/require-pod-priorityclassname/artifacthub-pkg.yml b/other-cel/require-pod-priorityclassname/artifacthub-pkg.yml index 20700714c..834899b20 100644 --- a/other-cel/require-pod-priorityclassname/artifacthub-pkg.yml +++ b/other-cel/require-pod-priorityclassname/artifacthub-pkg.yml @@ -20,6 +20,6 @@ annotations: kyverno/category: "Multi-Tenancy, EKS Best Practices in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: dad8ffaa48df075faa27733ac7a6a88ac644864bc1a1a5693ddb443775148279 +digest: 10070a8c58969454fde8742cc3c1fdd5c196d98a918e8504f833331dd0a1c03b createdAt: "2024-04-11T17:46:06Z" diff --git a/other-cel/require-pod-priorityclassname/require-pod-priorityclassname.yaml b/other-cel/require-pod-priorityclassname/require-pod-priorityclassname.yaml index f8a37b3a0..0e9be3f26 100644 --- a/other-cel/require-pod-priorityclassname/require-pod-priorityclassname.yaml +++ b/other-cel/require-pod-priorityclassname/require-pod-priorityclassname.yaml @@ -26,6 +26,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/require-qos-burstable/artifacthub-pkg.yml b/other-cel/require-qos-burstable/artifacthub-pkg.yml index f80147354..78d48eb08 100644 --- a/other-cel/require-qos-burstable/artifacthub-pkg.yml +++ b/other-cel/require-qos-burstable/artifacthub-pkg.yml @@ -20,6 +20,6 @@ annotations: kyverno/category: "Other, Multi-Tenancy in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: 3e93eb4eee26bb198811f9441bca6ef58dfb848e6446ccb38156b534a16fe16b +digest: 723f2fd7dcafa80eb362274960a518a13eecc425a96880ef690b7693496cc967 createdAt: "2024-04-11T17:54:50Z" diff --git a/other-cel/require-qos-burstable/require-qos-burstable.yaml b/other-cel/require-qos-burstable/require-qos-burstable.yaml index 329007c28..208d272b1 100644 --- a/other-cel/require-qos-burstable/require-qos-burstable.yaml +++ b/other-cel/require-qos-burstable/require-qos-burstable.yaml @@ -27,6 +27,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/require-qos-guaranteed/.chainsaw-test/chainsaw-test.yaml b/other-cel/require-qos-guaranteed/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..3a4e08816 --- /dev/null +++ b/other-cel/require-qos-guaranteed/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,38 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: require-qos-guaranteed +spec: + steps: + - name: step-01 + try: + - apply: + file: ../require-qos-guaranteed.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: require-qos-guaranteed + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: pod-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: pod-bad.yaml + - apply: + file: podcontroller-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: podcontroller-bad.yaml diff --git a/other-cel/require-qos-guaranteed/.chainsaw-test/pod-bad.yaml b/other-cel/require-qos-guaranteed/.chainsaw-test/pod-bad.yaml new file mode 100644 index 000000000..9cd0cbc6a --- /dev/null +++ b/other-cel/require-qos-guaranteed/.chainsaw-test/pod-bad.yaml @@ -0,0 +1,66 @@ +apiVersion: v1 +kind: Pod +metadata: + name: badpod01 + labels: + app: myapp +spec: + containers: + - name: busybox + image: busybox:1.35 + resources: + requests: + memory: "100Mi" + cpu: "1" + limits: + memory: "100Mi" +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod02 + labels: + app: myapp +spec: + containers: + - name: busybox + image: busybox:1.35 + resources: + requests: + memory: "50Mi" + cpu: "2" + - name: busybox-again + image: busybox:1.35 + resources: + limits: + memory: "100Mi" + cpu: "1" +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod03 + labels: + app: myapp +spec: + containers: + - name: busybox + image: busybox:1.35 + resources: + requests: + memory: "50Mi" + - name: busybox-again + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod04 + labels: + app: myapp +spec: + containers: + - name: busybox + image: busybox:1.35 + - name: busybox-again + image: busybox:1.35 \ No newline at end of file diff --git a/other-cel/require-qos-guaranteed/.chainsaw-test/pod-good.yaml b/other-cel/require-qos-guaranteed/.chainsaw-test/pod-good.yaml new file mode 100644 index 000000000..f9ae97536 --- /dev/null +++ b/other-cel/require-qos-guaranteed/.chainsaw-test/pod-good.yaml @@ -0,0 +1,44 @@ +apiVersion: v1 +kind: Pod +metadata: + name: goodpod01 + labels: + app: myapp +spec: + containers: + - name: busybox + image: busybox:1.35 + resources: + requests: + memory: "100Mi" + cpu: "1" + limits: + memory: "100Mi" + cpu: "1" +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod02 + labels: + app: myapp +spec: + containers: + - name: busybox + image: busybox:1.35 + resources: + requests: + memory: "50Mi" + cpu: "2" + limits: + memory: "50Mi" + cpu: "2" + - name: busybox-again + image: busybox:1.35 + resources: + requests: + memory: "100Mi" + cpu: "1" + limits: + memory: "100Mi" + cpu: "1" \ No newline at end of file diff --git a/other-cel/require-qos-guaranteed/.chainsaw-test/podcontroller-bad.yaml b/other-cel/require-qos-guaranteed/.chainsaw-test/podcontroller-bad.yaml new file mode 100644 index 000000000..7a363df17 --- /dev/null +++ b/other-cel/require-qos-guaranteed/.chainsaw-test/podcontroller-bad.yaml @@ -0,0 +1,46 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + foo: bar + template: + metadata: + labels: + foo: bar + spec: + containers: + - name: busybox + image: busybox:1.35 + resources: + requests: + memory: "100Mi" + limits: + cpu: "1" + - name: busybox-again + image: busybox:1.35 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob01 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: busybox + image: busybox:1.35 + - name: busybox-again + image: busybox:1.35 + resources: + requests: + memory: "50Mi" + limits: + cpu: "2" \ No newline at end of file diff --git a/other-cel/require-qos-guaranteed/.chainsaw-test/podcontroller-good.yaml b/other-cel/require-qos-guaranteed/.chainsaw-test/podcontroller-good.yaml new file mode 100644 index 000000000..b9d919b36 --- /dev/null +++ b/other-cel/require-qos-guaranteed/.chainsaw-test/podcontroller-good.yaml @@ -0,0 +1,64 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + foo: bar + template: + metadata: + labels: + foo: bar + spec: + containers: + - name: busybox + image: busybox:1.35 + resources: + requests: + memory: "200Mi" + cpu: "2" + limits: + memory: "200Mi" + cpu: "2" + - name: busybox-again + image: busybox:1.35 + resources: + requests: + memory: "100Mi" + cpu: "1" + limits: + memory: "100Mi" + cpu: "1" +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob01 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: busybox + image: busybox:1.35 + resources: + requests: + memory: "100Mi" + cpu: "1" + limits: + memory: "100Mi" + cpu: "1" + - name: busybox-again + image: busybox:1.35 + resources: + requests: + memory: "50Mi" + cpu: "0.5" + limits: + memory: "50Mi" + cpu: "0.5" \ No newline at end of file diff --git a/other-cel/require-qos-guaranteed/.chainsaw-test/policy-ready.yaml b/other-cel/require-qos-guaranteed/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..7d014a1b1 --- /dev/null +++ b/other-cel/require-qos-guaranteed/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-qos-guaranteed +status: + ready: true diff --git a/other-cel/require-qos-guaranteed/.kyverno-test/kyverno-test.yaml b/other-cel/require-qos-guaranteed/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..b11a7e2e1 --- /dev/null +++ b/other-cel/require-qos-guaranteed/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,24 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: require-qos-guaranteed +policies: +- ../require-qos-guaranteed.yaml +resources: +- resource.yaml +results: +- kind: Pod + policy: require-qos-guaranteed + resources: + - badpod01 + - badpod02 + - badpod03 + result: fail + rule: guaranteed +- kind: Pod + policy: require-qos-guaranteed + resources: + - goodpod01 + - goodpod02 + result: pass + rule: guaranteed diff --git a/other-cel/require-qos-guaranteed/.kyverno-test/resource.yaml b/other-cel/require-qos-guaranteed/.kyverno-test/resource.yaml new file mode 100644 index 000000000..105a8d2a5 --- /dev/null +++ b/other-cel/require-qos-guaranteed/.kyverno-test/resource.yaml @@ -0,0 +1,97 @@ +apiVersion: v1 +kind: Pod +metadata: + name: goodpod01 +spec: + containers: + - name: qos-demo-ctr + image: thisdoesnotexist:1.1.1 + resources: + limits: + memory: "200Mi" + cpu: "700m" + requests: + memory: "200Mi" + cpu: "700m" +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod02 +spec: + containers: + - name: qos-demo-ctr + image: thisdoesnotexist:1.1.1 + resources: + limits: + memory: "200Mi" + cpu: "700m" + requests: + memory: "200Mi" + cpu: "700m" + - name: seconddemo + image: thisdoesnotexist:1.1.1 + resources: + limits: + memory: "300Mi" + cpu: "500m" + requests: + memory: "300Mi" + cpu: "500m" +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod01 +spec: + containers: + - name: qos-demo-ctr + image: thisdoesnotexist:1.1.1 + resources: + limits: + memory: "400Mi" + cpu: "700m" + requests: + memory: "200Mi" + cpu: "700m" +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod02 +spec: + containers: + - name: qos-demo-ctr + image: thisdoesnotexist:1.1.1 + resources: + limits: + memory: "200Mi" + cpu: "900m" + requests: + memory: "200Mi" + cpu: "700m" +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod03 +spec: + containers: + - name: qos-demo-ctr + image: thisdoesnotexist:1.1.1 + resources: + limits: + memory: "200Mi" + cpu: "700m" + requests: + memory: "200Mi" + cpu: "700m" + - name: secondname + image: thisdoesnotexist:1.1.1 + resources: + limits: + memory: "200Mi" + cpu: "800m" + requests: + memory: "200Mi" + cpu: "700m" \ No newline at end of file diff --git a/other-cel/require-qos-guaranteed/artifacthub-pkg.yml b/other-cel/require-qos-guaranteed/artifacthub-pkg.yml new file mode 100644 index 000000000..047d7ef7e --- /dev/null +++ b/other-cel/require-qos-guaranteed/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: require-qos-guaranteed-cel +version: 1.0.0 +displayName: Require QoS Guaranteed in CEL expressions +description: >- + Pod Quality of Service (QoS) is a mechanism to ensure Pods receive certain priority guarantees based upon the resources they define. When Pods define both requests and limits for both memory and CPU, and the requests and limits are equal to each other, Kubernetes grants the QoS class as guaranteed which allows them to run at a higher priority than others. This policy requires that all containers within a Pod run with this definition resulting in a guaranteed QoS. This policy is provided with the intention that users will need to control its scope by using exclusions, preconditions, and other policy language mechanisms. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/require-qos-guaranteed/require-qos-guaranteed.yaml + ``` +keywords: + - kyverno + - Other + - Multi-Tenancy + - CEL Expressions +readme: | + Pod Quality of Service (QoS) is a mechanism to ensure Pods receive certain priority guarantees based upon the resources they define. When Pods define both requests and limits for both memory and CPU, and the requests and limits are equal to each other, Kubernetes grants the QoS class as guaranteed which allows them to run at a higher priority than others. This policy requires that all containers within a Pod run with this definition resulting in a guaranteed QoS. This policy is provided with the intention that users will need to control its scope by using exclusions, preconditions, and other policy language mechanisms. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Other, Multi-Tenancy in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Pod" +digest: 5f7ee8a0d9f33c875ac26940425f5cc12ac8f528ea6cf233df2b4c79ed5ae43d +createdAt: "2024-05-19T11:06:21Z" diff --git a/other-cel/require-qos-guaranteed/require-qos-guaranteed.yaml b/other-cel/require-qos-guaranteed/require-qos-guaranteed.yaml new file mode 100644 index 000000000..485ddfbcc --- /dev/null +++ b/other-cel/require-qos-guaranteed/require-qos-guaranteed.yaml @@ -0,0 +1,47 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-qos-guaranteed + annotations: + policies.kyverno.io/title: Require QoS Guaranteed in CEL expressions + policies.kyverno.io/category: Other, Multi-Tenancy in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Pod + kyverno.io/kyverno-version: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Pod Quality of Service (QoS) is a mechanism to ensure Pods receive certain + priority guarantees based upon the resources they define. When Pods define both + requests and limits for both memory and CPU, and the requests and limits are equal + to each other, Kubernetes grants the QoS class as guaranteed which allows them to run + at a higher priority than others. This policy requires that all containers within a Pod + run with this definition resulting in a guaranteed QoS. This policy is provided with the + intention that users will need to control its scope by using + exclusions, preconditions, and other policy language mechanisms. +spec: + validationFailureAction: Audit + background: true + rules: + - name: guaranteed + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + object.spec.containers.all(container, + has(container.resources) && + has(container.resources.requests) && + has(container.resources.requests.cpu) && has(container.resources.requests.memory) && + has(container.resources.limits) && + has(container.resources.limits.cpu) && has(container.resources.limits.memory) && + container.resources.requests.cpu == container.resources.limits.cpu && + container.resources.requests.memory == container.resources.limits.memory) + message: "All containers must define memory and CPU requests and limits where they are equal." + diff --git a/other-cel/require-storageclass/artifacthub-pkg.yml b/other-cel/require-storageclass/artifacthub-pkg.yml index b12cc6d8e..4af61aff5 100644 --- a/other-cel/require-storageclass/artifacthub-pkg.yml +++ b/other-cel/require-storageclass/artifacthub-pkg.yml @@ -20,6 +20,6 @@ annotations: kyverno/category: "Other, Multi-Tenancy in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "PersistentVolumeClaim, StatefulSet" -digest: 3ff19239688cf47b624bbd62d02624153b27059c6bed26ca290599eb2810ccf6 +digest: 50a19cfd04cb3ffc6cd1d064516042035b64213e16a85aecc891dc5c0806963c createdAt: "2024-04-11T18:06:16Z" diff --git a/other-cel/require-storageclass/require-storageclass.yaml b/other-cel/require-storageclass/require-storageclass.yaml index ff0240ab2..c7462d487 100644 --- a/other-cel/require-storageclass/require-storageclass.yaml +++ b/other-cel/require-storageclass/require-storageclass.yaml @@ -25,6 +25,9 @@ spec: - resources: kinds: - PersistentVolumeClaim + operations: + - CREATE + - UPDATE validate: cel: expressions: @@ -36,6 +39,9 @@ spec: - resources: kinds: - StatefulSet + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/restrict-annotations/artifacthub-pkg.yml b/other-cel/restrict-annotations/artifacthub-pkg.yml index 3a789bbff..7bf40797d 100644 --- a/other-cel/restrict-annotations/artifacthub-pkg.yml +++ b/other-cel/restrict-annotations/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Sample in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod, Annotation" -digest: 55b4953e1ca6aa6038d407aae539705d9a8c1136d2ffc277df1686a08ac7c9a8 +digest: 5e3188460d595814af4a9287d6af5e42819863d4b619bfc329effb6127c8bf94 createdAt: "2024-04-12T15:55:04Z" diff --git a/other-cel/restrict-annotations/restrict-annotations.yaml b/other-cel/restrict-annotations/restrict-annotations.yaml index 84f7df771..6baf71410 100644 --- a/other-cel/restrict-annotations/restrict-annotations.yaml +++ b/other-cel/restrict-annotations/restrict-annotations.yaml @@ -29,6 +29,9 @@ spec: - StatefulSet - DaemonSet - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/restrict-binding-clusteradmin/artifacthub-pkg.yml b/other-cel/restrict-binding-clusteradmin/artifacthub-pkg.yml index 68478d905..b97c83f97 100644 --- a/other-cel/restrict-binding-clusteradmin/artifacthub-pkg.yml +++ b/other-cel/restrict-binding-clusteradmin/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Security in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "RoleBinding, ClusterRoleBinding, RBAC" -digest: 313ef33ca553700424ca8c1dde5572219a1ef976a78138e78a6c5838f8a11d2c +digest: 7affbe90144f7d95e86ec9be12e95542296020026dd561cf79cd508b7dbb663d createdAt: "2024-04-12T16:00:17Z" diff --git a/other-cel/restrict-binding-clusteradmin/restrict-binding-clusteradmin.yaml b/other-cel/restrict-binding-clusteradmin/restrict-binding-clusteradmin.yaml index ef831d656..fd201e13b 100644 --- a/other-cel/restrict-binding-clusteradmin/restrict-binding-clusteradmin.yaml +++ b/other-cel/restrict-binding-clusteradmin/restrict-binding-clusteradmin.yaml @@ -26,6 +26,9 @@ spec: kinds: - RoleBinding - ClusterRoleBinding + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/restrict-binding-system-groups/artifacthub-pkg.yml b/other-cel/restrict-binding-system-groups/artifacthub-pkg.yml index b3f76a92b..a6908e5bc 100644 --- a/other-cel/restrict-binding-system-groups/artifacthub-pkg.yml +++ b/other-cel/restrict-binding-system-groups/artifacthub-pkg.yml @@ -20,6 +20,6 @@ annotations: kyverno/category: "Security, EKS Best Practices in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "RoleBinding, ClusterRoleBinding, RBAC" -digest: 9843fd8b6e7357bc01ccbfcd3280bc3bc1d8baa5da4dce46c7d0125906a8efdc +digest: 8a5fb4bfe55c063b3b14eaed7a81512548ce89cc7057aa5549723fefed670f1f createdAt: "2024-04-12T16:28:28Z" diff --git a/other-cel/restrict-binding-system-groups/restrict-binding-system-groups.yaml b/other-cel/restrict-binding-system-groups/restrict-binding-system-groups.yaml index 1b9e1a91b..621f9fc7b 100644 --- a/other-cel/restrict-binding-system-groups/restrict-binding-system-groups.yaml +++ b/other-cel/restrict-binding-system-groups/restrict-binding-system-groups.yaml @@ -26,6 +26,9 @@ spec: kinds: - RoleBinding - ClusterRoleBinding + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/restrict-clusterrole-nodesproxy/artifacthub-pkg.yml b/other-cel/restrict-clusterrole-nodesproxy/artifacthub-pkg.yml index 1234f553d..ee8e3bd59 100644 --- a/other-cel/restrict-clusterrole-nodesproxy/artifacthub-pkg.yml +++ b/other-cel/restrict-clusterrole-nodesproxy/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Sample in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "ClusterRole, RBAC" -digest: 5c78dc50201f3223c42e0ac414e23dcc418f487ae76031aa85eb4fbd6fa1a2c1 +digest: 070dd3d53f7c50f1cdbb48643fc69d73ba1af9766f5eba3809e42058d72f885c createdAt: "2024-04-13T16:12:56Z" diff --git a/other-cel/restrict-clusterrole-nodesproxy/restrict-clusterrole-nodesproxy.yaml b/other-cel/restrict-clusterrole-nodesproxy/restrict-clusterrole-nodesproxy.yaml index 194bb5ff4..65e835a45 100644 --- a/other-cel/restrict-clusterrole-nodesproxy/restrict-clusterrole-nodesproxy.yaml +++ b/other-cel/restrict-clusterrole-nodesproxy/restrict-clusterrole-nodesproxy.yaml @@ -27,6 +27,9 @@ spec: - resources: kinds: - ClusterRole + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/restrict-controlplane-scheduling/artifacthub-pkg.yml b/other-cel/restrict-controlplane-scheduling/artifacthub-pkg.yml index 4cdf099f3..66814ad24 100644 --- a/other-cel/restrict-controlplane-scheduling/artifacthub-pkg.yml +++ b/other-cel/restrict-controlplane-scheduling/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Sample in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: 55668531abc1c98763bcfac82efd6bb912c37fbf5b0f8cf68dc09fd0c24b6ce9 +digest: 95c237c7d39fa64b37cf5708d566ca582f7f55770708092cf1e38d0a4e8a0828 createdAt: "2024-04-13T16:19:01Z" diff --git a/other-cel/restrict-controlplane-scheduling/restrict-controlplane-scheduling.yaml b/other-cel/restrict-controlplane-scheduling/restrict-controlplane-scheduling.yaml index 41e83b84d..7a5a4e2d7 100644 --- a/other-cel/restrict-controlplane-scheduling/restrict-controlplane-scheduling.yaml +++ b/other-cel/restrict-controlplane-scheduling/restrict-controlplane-scheduling.yaml @@ -24,6 +24,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/restrict-deprecated-registry/artifacthub-pkg.yml b/other-cel/restrict-deprecated-registry/artifacthub-pkg.yml index 9862688b9..fe1cd7447 100644 --- a/other-cel/restrict-deprecated-registry/artifacthub-pkg.yml +++ b/other-cel/restrict-deprecated-registry/artifacthub-pkg.yml @@ -20,6 +20,6 @@ annotations: kyverno/category: "Best Practices, EKS Best Practices in CEL" kyverno/kubernetesVersion: "1.27-1.28" kyverno/subject: "Pod" -digest: 8973c813a1c65f2137273d66cd871bfbe53d80b30bae87685713ddc1ce96eb32 +digest: b7e1108f954b94f8de8d26c564d37e1a6930648c9bb725ac2d3d3b6456d2ea2d createdAt: "2024-04-13T16:21:40Z" diff --git a/other-cel/restrict-deprecated-registry/restrict-deprecated-registry.yaml b/other-cel/restrict-deprecated-registry/restrict-deprecated-registry.yaml index 3102410a4..23f3168e6 100644 --- a/other-cel/restrict-deprecated-registry/restrict-deprecated-registry.yaml +++ b/other-cel/restrict-deprecated-registry/restrict-deprecated-registry.yaml @@ -25,6 +25,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: variables: diff --git a/other-cel/restrict-edit-for-endpoints/artifacthub-pkg.yml b/other-cel/restrict-edit-for-endpoints/artifacthub-pkg.yml new file mode 100644 index 000000000..fb9ab8d60 --- /dev/null +++ b/other-cel/restrict-edit-for-endpoints/artifacthub-pkg.yml @@ -0,0 +1,23 @@ +name: restrict-edit-for-endpoints-cel +version: 1.0.0 +displayName: Restrict Edit for Endpoints CVE-2021-25740 in CEL expressions +description: >- + Clusters not initially installed with Kubernetes 1.22 may be vulnerable to an issue defined in CVE-2021-25740 which could enable users to send network traffic to locations they would otherwise not have access to via a confused deputy attack. This was due to the system:aggregate-to-edit ClusterRole having edit permission of Endpoints. This policy, intended to run in background mode, checks if your cluster is vulnerable to CVE-2021-25740 by ensuring the system:aggregate-to-edit ClusterRole does not have the edit permission of Endpoints. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/restrict-edit-for-endpoints/restrict-edit-for-endpoints.yaml + ``` +keywords: + - kyverno + - Security + - CEL Expressions +readme: | + Clusters not initially installed with Kubernetes 1.22 may be vulnerable to an issue defined in CVE-2021-25740 which could enable users to send network traffic to locations they would otherwise not have access to via a confused deputy attack. This was due to the system:aggregate-to-edit ClusterRole having edit permission of Endpoints. This policy, intended to run in background mode, checks if your cluster is vulnerable to CVE-2021-25740 by ensuring the system:aggregate-to-edit ClusterRole does not have the edit permission of Endpoints. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Security in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "ClusterRole" +digest: 1744f09b521d94f2d72bd0d7f50986ccd07cc90a9f66dbbdbaa985ca8c8e5b7e +createdAt: "2024-05-19T14:59:05Z" diff --git a/other-cel/restrict-edit-for-endpoints/restrict-edit-for-endpoints.yaml b/other-cel/restrict-edit-for-endpoints/restrict-edit-for-endpoints.yaml new file mode 100644 index 000000000..f1539a014 --- /dev/null +++ b/other-cel/restrict-edit-for-endpoints/restrict-edit-for-endpoints.yaml @@ -0,0 +1,39 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-edit-for-endpoints + annotations: + policies.kyverno.io/title: Restrict Edit for Endpoints CVE-2021-25740 in CEL expressions + policies.kyverno.io/category: Security in CEL + policies.kyverno.io/severity: low + policies.kyverno.io/subject: ClusterRole + kyverno.io/kyverno-version: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Clusters not initially installed with Kubernetes 1.22 may be vulnerable to an issue + defined in CVE-2021-25740 which could enable users to send network traffic to locations + they would otherwise not have access to via a confused deputy attack. This was due to + the system:aggregate-to-edit ClusterRole having edit permission of Endpoints. + This policy, intended to run in background mode, checks if your cluster is vulnerable + to CVE-2021-25740 by ensuring the system:aggregate-to-edit ClusterRole does not have + the edit permission of Endpoints. +spec: + validationFailureAction: Audit + background: true + rules: + - name: system-aggregate-to-edit-check + match: + any: + - resources: + kinds: + - ClusterRole + names: + - system:aggregate-to-edit + validate: + cel: + expressions: + - expression: "!object.rules.exists(rule, 'endpoints' in rule.resources && 'edit' in rule.verbs)" + message: >- + This cluster may still be vulnerable to CVE-2021-25740. The system:aggregate-to-edit ClusterRole + should not have edit permission over Endpoints. + diff --git a/other-cel/restrict-escalation-verbs-roles/artifacthub-pkg.yml b/other-cel/restrict-escalation-verbs-roles/artifacthub-pkg.yml index ae1cb4bef..05f5dd53a 100644 --- a/other-cel/restrict-escalation-verbs-roles/artifacthub-pkg.yml +++ b/other-cel/restrict-escalation-verbs-roles/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Security in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Role, ClusterRole, RBAC" -digest: 145bfa9745d524e77c11d35ea267c3c2323eb6d9d13c3b7c00632eb358da7d75 +digest: 44c62b5989a9e99a591a95db11463125b7a8c0ad172e08881e527cebb3423293 createdAt: "2024-04-14T15:40:58Z" diff --git a/other-cel/restrict-escalation-verbs-roles/restrict-escalation-verbs-roles.yaml b/other-cel/restrict-escalation-verbs-roles/restrict-escalation-verbs-roles.yaml index 0bbe51474..3191b9904 100644 --- a/other-cel/restrict-escalation-verbs-roles/restrict-escalation-verbs-roles.yaml +++ b/other-cel/restrict-escalation-verbs-roles/restrict-escalation-verbs-roles.yaml @@ -25,6 +25,9 @@ spec: kinds: - Role - ClusterRole + operations: + - CREATE + - UPDATE validate: cel: variables: diff --git a/other-cel/restrict-ingress-classes/artifacthub-pkg.yml b/other-cel/restrict-ingress-classes/artifacthub-pkg.yml index ce6b364d9..7c72cc146 100644 --- a/other-cel/restrict-ingress-classes/artifacthub-pkg.yml +++ b/other-cel/restrict-ingress-classes/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Sample in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Ingress" -digest: 1619d89eec3ee9bf4de2f76b747b01c885af2c3b42631c3b0ff1e50a6717e9e1 +digest: abbd493cdcdfd2a7ec903027a4b7f56b1d7761e7a0ce2822d1521bb853791455 createdAt: "2024-04-14T15:43:33Z" diff --git a/other-cel/restrict-ingress-classes/restrict-ingress-classes.yaml b/other-cel/restrict-ingress-classes/restrict-ingress-classes.yaml index f448f323b..ee46fccea 100644 --- a/other-cel/restrict-ingress-classes/restrict-ingress-classes.yaml +++ b/other-cel/restrict-ingress-classes/restrict-ingress-classes.yaml @@ -26,6 +26,9 @@ spec: - resources: kinds: - Ingress + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/restrict-ingress-defaultbackend/artifacthub-pkg.yml b/other-cel/restrict-ingress-defaultbackend/artifacthub-pkg.yml index 04342b17f..7374d591a 100644 --- a/other-cel/restrict-ingress-defaultbackend/artifacthub-pkg.yml +++ b/other-cel/restrict-ingress-defaultbackend/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Best Practices in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Ingress" -digest: daed0d8b9b07fa2d102540d7f7204752cc44ace565668469b66ac2d224c87ca4 +digest: b4e07522bb17d990d112a2ba7a472c9662be01358fd8caa9806186246ffa7521 createdAt: "2024-04-14T15:45:57Z" diff --git a/other-cel/restrict-ingress-defaultbackend/restrict-ingress-defaultbackend.yaml b/other-cel/restrict-ingress-defaultbackend/restrict-ingress-defaultbackend.yaml index 96bbb1fa8..af50efc35 100644 --- a/other-cel/restrict-ingress-defaultbackend/restrict-ingress-defaultbackend.yaml +++ b/other-cel/restrict-ingress-defaultbackend/restrict-ingress-defaultbackend.yaml @@ -27,6 +27,9 @@ spec: - resources: kinds: - Ingress + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/restrict-ingress-wildcard/artifacthub-pkg.yml b/other-cel/restrict-ingress-wildcard/artifacthub-pkg.yml index 30e88d18c..7e909e5a3 100644 --- a/other-cel/restrict-ingress-wildcard/artifacthub-pkg.yml +++ b/other-cel/restrict-ingress-wildcard/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Other in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Ingress" -digest: efc9d13e01f12ada9ae55b8cd4ef572c767670d65ca189bdea4857969a3a7365 +digest: 74fa42f42b40f259054e0a4c097e10673bfa977ef0f5451cef18d07222142a5b createdAt: "2024-04-15T18:06:41Z" diff --git a/other-cel/restrict-ingress-wildcard/restrict-ingress-wildcard.yaml b/other-cel/restrict-ingress-wildcard/restrict-ingress-wildcard.yaml index 7cc5de6a2..3eded4e68 100644 --- a/other-cel/restrict-ingress-wildcard/restrict-ingress-wildcard.yaml +++ b/other-cel/restrict-ingress-wildcard/restrict-ingress-wildcard.yaml @@ -26,6 +26,9 @@ spec: - resources: kinds: - Ingress + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/restrict-jobs/.chainsaw-test/chainsaw-test.yaml b/other-cel/restrict-jobs/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..0537560b5 --- /dev/null +++ b/other-cel/restrict-jobs/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,23 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: restrict-jobs +spec: + steps: + - name: step-01 + try: + - apply: + file: ../restrict-jobs.yaml + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + expect: + - check: + ($error != null): true + file: jobs-bad.yaml + - apply: + file: cronjobs-good.yaml diff --git a/other-cel/restrict-jobs/.chainsaw-test/cronjobs-good.yaml b/other-cel/restrict-jobs/.chainsaw-test/cronjobs-good.yaml new file mode 100644 index 000000000..cc0f70f29 --- /dev/null +++ b/other-cel/restrict-jobs/.chainsaw-test/cronjobs-good.yaml @@ -0,0 +1,15 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + spec: + containers: + - name: busybox + image: busybox:1.35 + command: ["sleep", "3600"] + restartPolicy: OnFailure \ No newline at end of file diff --git a/other-cel/restrict-jobs/.chainsaw-test/jobs-bad.yaml b/other-cel/restrict-jobs/.chainsaw-test/jobs-bad.yaml new file mode 100644 index 000000000..700346ed5 --- /dev/null +++ b/other-cel/restrict-jobs/.chainsaw-test/jobs-bad.yaml @@ -0,0 +1,12 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: badjob +spec: + template: + spec: + containers: + - name: busybox + image: busybox:1.35 + command: ["sleep", "3600"] + restartPolicy: Never \ No newline at end of file diff --git a/kasten/k10-minimum-retention/.chainsaw-test/policy-ready.yaml b/other-cel/restrict-jobs/.chainsaw-test/policy-ready.yaml similarity index 81% rename from kasten/k10-minimum-retention/.chainsaw-test/policy-ready.yaml rename to other-cel/restrict-jobs/.chainsaw-test/policy-ready.yaml index 99fd5a77e..118a84bec 100644 --- a/kasten/k10-minimum-retention/.chainsaw-test/policy-ready.yaml +++ b/other-cel/restrict-jobs/.chainsaw-test/policy-ready.yaml @@ -1,7 +1,7 @@ apiVersion: kyverno.io/v1 kind: ClusterPolicy metadata: - name: k10-minimum-retention + name: restrict-jobs status: conditions: - reason: Succeeded diff --git a/other-cel/restrict-jobs/.kyverno-test/kyverno-test.yaml b/other-cel/restrict-jobs/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..c2e9af736 --- /dev/null +++ b/other-cel/restrict-jobs/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,21 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: restrict-jobs +policies: +- ../restrict-jobs.yaml +resources: +- resource.yaml +results: +- policy: restrict-jobs + rule: restrict-job-from-cronjob + kind: Job + resources: + - badjob + result: fail +- policy: restrict-jobs + rule: restrict-job-from-cronjob + kind: Job + resources: + - goodjob + result: skip diff --git a/other-cel/restrict-jobs/.kyverno-test/resource.yaml b/other-cel/restrict-jobs/.kyverno-test/resource.yaml new file mode 100644 index 000000000..6e48e4443 --- /dev/null +++ b/other-cel/restrict-jobs/.kyverno-test/resource.yaml @@ -0,0 +1,32 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: badjob +spec: + template: + spec: + containers: + - name: busybox + image: busybox:1.35 + command: ["sleep", "3600"] + restartPolicy: Never +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: goodjob + ownerReferences: + - apiVersion: batch/v1 + blockOwnerDeletion: true + controller: true + kind: CronJob + name: goodcronjob01 + uid: a554d6b8-8b0a-44da-a9d9-d76a1f85b320 +spec: + template: + spec: + containers: + - name: busybox + image: busybox:1.35 + command: ["sleep", "3600"] + restartPolicy: Never \ No newline at end of file diff --git a/other-cel/restrict-jobs/artifacthub-pkg.yml b/other-cel/restrict-jobs/artifacthub-pkg.yml new file mode 100644 index 000000000..1f1b05fb6 --- /dev/null +++ b/other-cel/restrict-jobs/artifacthub-pkg.yml @@ -0,0 +1,23 @@ +name: restrict-jobs-cel +version: 1.0.0 +displayName: Restrict Jobs in CEL expressions +description: >- + Jobs can be created directly and indirectly via a CronJob controller. In some cases, users may want to only allow Jobs if they are created via a CronJob. This policy restricts Jobs so they may only be created by a CronJob. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/restrict-jobs/restrict-jobs.yaml + ``` +keywords: + - kyverno + - Other + - CEL Expressions +readme: | + Jobs can be created directly and indirectly via a CronJob controller. In some cases, users may want to only allow Jobs if they are created via a CronJob. This policy restricts Jobs so they may only be created by a CronJob. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Other in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Job" +digest: d8806389e8dd3e8ed5a2fe1a38fe4e4dec080af79d2cca7e684ddb46f244c6be +createdAt: "2024-05-19T16:05:23Z" diff --git a/other-cel/restrict-jobs/restrict-jobs.yaml b/other-cel/restrict-jobs/restrict-jobs.yaml new file mode 100644 index 000000000..13b836675 --- /dev/null +++ b/other-cel/restrict-jobs/restrict-jobs.yaml @@ -0,0 +1,33 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-jobs + annotations: + policies.kyverno.io/title: Restrict Jobs in CEL expressions + policies.kyverno.io/category: Other in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Job + kyverno.io/kyverno-version: 1.12.1 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Jobs can be created directly and indirectly via a CronJob controller. + In some cases, users may want to only allow Jobs if they are created via a CronJob. + This policy restricts Jobs so they may only be created by a CronJob. +spec: + validationFailureAction: Enforce + rules: + - name: restrict-job-from-cronjob + match: + any: + - resources: + kinds: + - Job + celPreconditions: + - name: "not-created-by-cronjob" + expression: "!has(object.metadata.ownerReferences) || object.metadata.ownerReferences[0].kind != 'CronJob'" + validate: + cel: + expressions: + - expression: "false" + message: Jobs are only allowed if spawned from CronJobs. + diff --git a/other-cel/restrict-loadbalancer/artifacthub-pkg.yml b/other-cel/restrict-loadbalancer/artifacthub-pkg.yml index 27245180d..5917a62f1 100644 --- a/other-cel/restrict-loadbalancer/artifacthub-pkg.yml +++ b/other-cel/restrict-loadbalancer/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Sample in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Service" -digest: 33b5031b68eb2f05d6dc535516fff514947846c6b64b1944e1546c897afae750 +digest: 2b6dd5c292505f25dd5074052ea247c8febd8686067215033097f045cf8bbe0b createdAt: "2024-04-17T17:49:00Z" diff --git a/other-cel/restrict-loadbalancer/restrict-loadbalancer.yaml b/other-cel/restrict-loadbalancer/restrict-loadbalancer.yaml index 3d68595b9..08b7cb558 100644 --- a/other-cel/restrict-loadbalancer/restrict-loadbalancer.yaml +++ b/other-cel/restrict-loadbalancer/restrict-loadbalancer.yaml @@ -25,6 +25,9 @@ spec: - resources: kinds: - Service + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/restrict-networkpolicy-empty-podselector/artifacthub-pkg.yml b/other-cel/restrict-networkpolicy-empty-podselector/artifacthub-pkg.yml index 6ff9f7a40..e130a25be 100644 --- a/other-cel/restrict-networkpolicy-empty-podselector/artifacthub-pkg.yml +++ b/other-cel/restrict-networkpolicy-empty-podselector/artifacthub-pkg.yml @@ -20,6 +20,6 @@ annotations: kyverno/category: "Other, Multi-Tenancy in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "NetworkPolicy" -digest: 3eeb200fc6a3efbfd7855a39e6c39d4d1a9222435b02a81b871a9da523012c63 +digest: c55047723a696dfb02a59fb2d933edabdb4796436c55587588d5a9c40ee08e2c createdAt: "2024-04-17T17:51:58Z" diff --git a/other-cel/restrict-networkpolicy-empty-podselector/restrict-networkpolicy-empty-podselector.yaml b/other-cel/restrict-networkpolicy-empty-podselector/restrict-networkpolicy-empty-podselector.yaml index ac560629a..2c4605a1f 100644 --- a/other-cel/restrict-networkpolicy-empty-podselector/restrict-networkpolicy-empty-podselector.yaml +++ b/other-cel/restrict-networkpolicy-empty-podselector/restrict-networkpolicy-empty-podselector.yaml @@ -24,6 +24,9 @@ spec: - resources: kinds: - NetworkPolicy + operations: + - CREATE + - UPDATE exclude: any: - resources: diff --git a/other-cel/restrict-node-affinity/artifacthub-pkg.yml b/other-cel/restrict-node-affinity/artifacthub-pkg.yml index 74c9ede34..57eb45451 100644 --- a/other-cel/restrict-node-affinity/artifacthub-pkg.yml +++ b/other-cel/restrict-node-affinity/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Other in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: 333f074f29fd324ac79a1fc9a191d39f73cb83d8c2d17f0dbde5668f959584f3 +digest: a148811b16c64d0d77e4d14b4cc368acd90a186276801c2dc4cc7ce4f0fb9b98 createdAt: "2024-04-18T18:08:24Z" diff --git a/other-cel/restrict-node-affinity/restrict-node-affinity.yaml b/other-cel/restrict-node-affinity/restrict-node-affinity.yaml index 0cd3c3d84..fbad132dc 100644 --- a/other-cel/restrict-node-affinity/restrict-node-affinity.yaml +++ b/other-cel/restrict-node-affinity/restrict-node-affinity.yaml @@ -25,6 +25,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/restrict-node-label-creation/.chainsaw-test/chainsaw-test.yaml b/other-cel/restrict-node-label-creation/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..7174c36d8 --- /dev/null +++ b/other-cel/restrict-node-label-creation/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,35 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: restrict-node-label-creation +spec: + steps: + - name: step-01 + try: + - script: + content: | + kubectl get configmap kyverno -n kyverno -o yaml | sed 's/\[Node\/\*,\*,\*\]//g' - | sed 's/\[Node,\*,\*\]//g' - | kubectl apply -f - + - sleep: + duration: 5s + - name: step-02 + try: + - apply: + file: ../restrict-node-label-creation.yaml + - assert: + file: policy-ready.yaml + - name: step-03 + try: + - script: + content: | + node=$(kubectl get nodes --no-headers | awk 'NR==1{print $1}') + if kubectl label --overwrite nodes $node foo=bar; then echo "Failure: successfully set label foo"; exit 1; else echo "Success: failed to set label foo"; fi + if kubectl label --overwrite nodes $node bar=bar; then echo "Success: set label bar"; else echo "Failed to set label bar"; exit 1; fi + if kubectl label --overwrite nodes $node bar=foo; then echo "Success: modified label bar"; else echo "Failed to modify label bar"; exit 1; fi + if kubectl label nodes $node bar-; then echo "Success: removed label bar"; else echo "Failed to remove label bar"; exit 1; fi + - name: step-04 + try: + - script: + content: | + kubectl get configmap -n kyverno kyverno -o yaml | sed 's/\[APIService,\*,\*\]/\[Node,\*,\*\] \[Node\/\*,\*,\*\] \[APIService,\*,\*\]/g' - | kubectl apply -f - diff --git a/other-cel/restrict-node-label-creation/.chainsaw-test/policy-ready.yaml b/other-cel/restrict-node-label-creation/.chainsaw-test/policy-ready.yaml new file mode 100644 index 000000000..dd8579329 --- /dev/null +++ b/other-cel/restrict-node-label-creation/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,9 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-node-label-creation +status: + conditions: + - reason: Succeeded + status: "True" + type: Ready \ No newline at end of file diff --git a/other-cel/restrict-node-label-creation/artifacthub-pkg.yml b/other-cel/restrict-node-label-creation/artifacthub-pkg.yml new file mode 100644 index 000000000..98c6f363b --- /dev/null +++ b/other-cel/restrict-node-label-creation/artifacthub-pkg.yml @@ -0,0 +1,23 @@ +name: restrict-node-label-creation-cel +version: 1.0.0 +displayName: restrict node laUel creation in CEL expressions +description: >- + Node labels are critical pieces of metadata upon which many other applications and logic may depend and should not be altered or removed by regular users. Many cloud providers also use Node labels to signal specific functions to applications. This policy prevents setting of a new label called `foo` on cluster Nodes. Use of this policy requires removal of the Node resource filter in the Kyverno ConfigMap ([Node,*,*]). Due to Kubernetes CVE-2021-25735, this policy requires, at minimum, one of the following versions of Kubernetes: v1.18.18, v1.19.10, v1.20.6, or v1.21.0. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/restrict-node-label-creation/restrict-node-label-creation.yaml + ``` +keywords: + - kyverno + - Sample + - CEL Expressions +readme: | + Node labels are critical pieces of metadata upon which many other applications and logic may depend and should not be altered or removed by regular users. Many cloud providers also use Node labels to signal specific functions to applications. This policy prevents setting of a new label called `foo` on cluster Nodes. Use of this policy requires removal of the Node resource filter in the Kyverno ConfigMap ([Node,*,*]). Due to Kubernetes CVE-2021-25735, this policy requires, at minimum, one of the following versions of Kubernetes: v1.18.18, v1.19.10, v1.20.6, or v1.21.0. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Sample in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Node, Label" +digest: 688f42a4211a49dd6f743e2e302654447b9e27d8da63cb5547201be85cbb783b +createdAt: "2024-05-20T03:52:11Z" diff --git a/other-cel/restrict-node-label-creation/restrict-node-label-creation.yaml b/other-cel/restrict-node-label-creation/restrict-node-label-creation.yaml new file mode 100644 index 000000000..5a51e0975 --- /dev/null +++ b/other-cel/restrict-node-label-creation/restrict-node-label-creation.yaml @@ -0,0 +1,40 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-node-label-creation + annotations: + policies.kyverno.io/title: Restrict node label creation in CEL expressions + policies.kyverno.io/category: Sample in CEL + policies.kyverno.io/subject: Node, Label + kyverno.io/kyverno-version: 1.12.1 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Node labels are critical pieces of metadata upon which many other applications and + logic may depend and should not be altered or removed by regular users. Many cloud + providers also use Node labels to signal specific functions to applications. + This policy prevents setting of a new label called `foo` on + cluster Nodes. Use of this policy requires removal of the Node resource filter + in the Kyverno ConfigMap ([Node,*,*]). Due to Kubernetes CVE-2021-25735, this policy + requires, at minimum, one of the following versions of Kubernetes: + v1.18.18, v1.19.10, v1.20.6, or v1.21.0. +spec: + validationFailureAction: Enforce + background: false + rules: + - name: prevent-label-set + match: + any: + - resources: + kinds: + - Node + celPreconditions: + - name: "operation-should-be-update" + expression: "request.operation == 'UPDATE'" + - name: "has-foo-label" + expression: "has(object.metadata.labels) && 'foo' in object.metadata.labels" + validate: + cel: + expressions: + - expression: "false" + message: "Setting the `foo` label on a Node is not allowed." + diff --git a/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/chainsaw-test.yaml b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..d8398690b --- /dev/null +++ b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,50 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: restrict-pod-controller-serviceaccount-updates +spec: + steps: + - name: step-01 + try: + - apply: + file: ../restrict-pod-controller-serviceaccount-updates.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: restrict-pod-controller-serviceaccount-updates + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: ns.yaml + - apply: + file: sa-01.yaml + - apply: + file: sa-02.yaml + - apply: + file: deployment.yaml + - apply: + file: cronjob.yaml + - name: step-03 + try: + - apply: + expect: + - check: + ($error != null): true + file: cronjob-bad-update.yaml + - apply: + expect: + - check: + ($error != null): true + file: deploy-bad-update.yaml + - apply: + file: cronjob-good-update.yaml + - apply: + file: deploy-good-update.yaml diff --git a/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/cronjob-bad-update.yaml b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/cronjob-bad-update.yaml new file mode 100644 index 000000000..035cb6a3c --- /dev/null +++ b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/cronjob-bad-update.yaml @@ -0,0 +1,15 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: cronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + spec: + serviceAccountName: serviceaccount02 + containers: + - name: busybox + image: busybox:1.35 + restartPolicy: OnFailure \ No newline at end of file diff --git a/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/cronjob-good-update.yaml b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/cronjob-good-update.yaml new file mode 100644 index 000000000..004731f65 --- /dev/null +++ b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/cronjob-good-update.yaml @@ -0,0 +1,11 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: cronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: Never \ No newline at end of file diff --git a/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/cronjob.yaml b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/cronjob.yaml new file mode 100755 index 000000000..5e04c53ee --- /dev/null +++ b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/cronjob.yaml @@ -0,0 +1,15 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: cronjob01 +spec: + jobTemplate: + spec: + template: + spec: + containers: + - image: busybox:1.35 + name: busybox + restartPolicy: OnFailure + serviceAccountName: serviceaccount01 + schedule: '* * * * *' diff --git a/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/deploy-bad-update.yaml b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/deploy-bad-update.yaml new file mode 100644 index 000000000..8788cebd3 --- /dev/null +++ b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/deploy-bad-update.yaml @@ -0,0 +1,21 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: deployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + serviceAccountName: serviceaccount02 + containers: + - name: busybox + image: busybox:1.35 \ No newline at end of file diff --git a/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/deploy-good-update.yaml b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/deploy-good-update.yaml new file mode 100644 index 000000000..f100a5052 --- /dev/null +++ b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/deploy-good-update.yaml @@ -0,0 +1,10 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: deployment01 +spec: + template: + spec: + restartPolicy: Always \ No newline at end of file diff --git a/kasten/k10-generate-gold-backup-policy/.chainsaw-test/chainsaw-step-03-apply-7.yaml b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/deployment.yaml similarity index 63% rename from kasten/k10-generate-gold-backup-policy/.chainsaw-test/chainsaw-step-03-apply-7.yaml rename to other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/deployment.yaml index 68a62ce5f..b8f88a8ad 100755 --- a/kasten/k10-generate-gold-backup-policy/.chainsaw-test/chainsaw-step-03-apply-7.yaml +++ b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/deployment.yaml @@ -1,17 +1,15 @@ apiVersion: apps/v1 -kind: StatefulSet +kind: Deployment metadata: labels: - dataprotection: k10-simplepolicy - purpose: production - name: ss02 - namespace: k10-gp-ns03 + app: busybox + name: deployment01 spec: replicas: 1 selector: matchLabels: app: busybox - serviceName: busybox-ss + strategy: {} template: metadata: labels: @@ -20,3 +18,4 @@ spec: containers: - image: busybox:1.35 name: busybox + serviceAccountName: serviceaccount01 diff --git a/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/ns.yaml b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/ns.yaml new file mode 100755 index 000000000..e3688b96e --- /dev/null +++ b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/ns.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: restrict-sa-ns diff --git a/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/policy-ready.yaml b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..92b7018c8 --- /dev/null +++ b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-pod-controller-serviceaccount-updates +status: + ready: true diff --git a/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/sa-01.yaml b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/sa-01.yaml new file mode 100755 index 000000000..71e72fad5 --- /dev/null +++ b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/sa-01.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: serviceaccount01 + namespace: restrict-sa-ns diff --git a/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/sa-02.yaml b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/sa-02.yaml new file mode 100755 index 000000000..042c339a8 --- /dev/null +++ b/other-cel/restrict-pod-controller-serviceaccount-updates/.chainsaw-test/sa-02.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: serviceaccount02 + namespace: restrict-sa-ns diff --git a/other-cel/restrict-pod-controller-serviceaccount-updates/artifacthub-pkg.yml b/other-cel/restrict-pod-controller-serviceaccount-updates/artifacthub-pkg.yml new file mode 100644 index 000000000..94461cb0c --- /dev/null +++ b/other-cel/restrict-pod-controller-serviceaccount-updates/artifacthub-pkg.yml @@ -0,0 +1,23 @@ +name: restrict-pod-controller-serviceaccount-updates-cel +version: 1.0.0 +displayName: Restrict Pod Controller ServiceAccount Updates in CEL expressions +description: >- + ServiceAccounts which have the ability to edit/patch workloads which they created may potentially use that privilege to update to a different ServiceAccount with higher privileges. This policy, intended to be run in `enforce` mode, blocks updates to Pod controllers if those updates modify the serviceAccountName field. Updates to Pods directly for this field are not possible as it is immutable once set. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/restrict-pod-controller-serviceaccount-updates/restrict-pod-controller-serviceaccount-updates.yaml + ``` +keywords: + - kyverno + - Other + - CEL Expressions +readme: | + ServiceAccounts which have the ability to edit/patch workloads which they created may potentially use that privilege to update to a different ServiceAccount with higher privileges. This policy, intended to be run in `enforce` mode, blocks updates to Pod controllers if those updates modify the serviceAccountName field. Updates to Pods directly for this field are not possible as it is immutable once set. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Other in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Pod" +digest: a3447fb207a7640b2744570dbe01cc0816128a7b7e0776ac2febf6c5a4db0e77 +createdAt: "2024-05-20T04:20:28Z" diff --git a/other-cel/restrict-pod-controller-serviceaccount-updates/restrict-pod-controller-serviceaccount-updates.yaml b/other-cel/restrict-pod-controller-serviceaccount-updates/restrict-pod-controller-serviceaccount-updates.yaml new file mode 100644 index 000000000..81da09843 --- /dev/null +++ b/other-cel/restrict-pod-controller-serviceaccount-updates/restrict-pod-controller-serviceaccount-updates.yaml @@ -0,0 +1,59 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-pod-controller-serviceaccount-updates + annotations: + policies.kyverno.io/title: Restrict Pod Controller ServiceAccount Updates in CEL Expressions + policies.kyverno.io/category: Other in CEL + policies.kyverno.io/severity: Medium + policies.kyverno.io/subject: Pod + kyverno.io/kyverno-version: 1.12.1 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + ServiceAccounts which have the ability to edit/patch workloads which they created + may potentially use that privilege to update to a different ServiceAccount with higher + privileges. This policy, intended to be run in `enforce` mode, blocks updates + to Pod controllers if those updates modify the serviceAccountName field. Updates to Pods + directly for this field are not possible as it is immutable once set. +spec: + validationFailureAction: Audit + background: true + rules: + - name: block-serviceaccount-updates + match: + any: + - resources: + kinds: + - DaemonSet + - Deployment + - Job + - StatefulSet + - ReplicaSet + - ReplicationController + celPreconditions: + - name: "operation-should-be-update" + expression: "request.operation == 'UPDATE'" + validate: + cel: + expressions: + - expression: >- + object.spec.template.spec.?serviceAccountName.orValue('empty') == oldObject.spec.template.spec.?serviceAccountName.orValue('empty') + message: >- + The serviceAccountName field may not be changed once created. + - name: block-serviceaccount-updates-cronjob + match: + any: + - resources: + kinds: + - CronJob + celPreconditions: + - name: "operation-should-be-update" + expression: "request.operation == 'UPDATE'" + validate: + cel: + expressions: + - expression: >- + object.spec.jobTemplate.spec.template.spec.?serviceAccountName.orValue('empty') == oldObject.spec.jobTemplate.spec.template.spec.?serviceAccountName.orValue('empty') + message: >- + The serviceAccountName field may not be changed once created. + diff --git a/other-cel/restrict-sa-automount-sa-token/artifacthub-pkg.yml b/other-cel/restrict-sa-automount-sa-token/artifacthub-pkg.yml index 6681c477c..af52ba110 100644 --- a/other-cel/restrict-sa-automount-sa-token/artifacthub-pkg.yml +++ b/other-cel/restrict-sa-automount-sa-token/artifacthub-pkg.yml @@ -27,6 +27,6 @@ annotations: kyverno/category: "Security in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "ServiceAccount" -digest: 8df2f43e524f85ca4d1ba0b5034a821de452c8affb89c6177db70c53e016ad36 +digest: 3401afb861d2b9ca9d53ab5667ac1da3a32ba4af4a421accf65ed8448a63a6f2 createdAt: "2024-04-18T18:11:04Z" diff --git a/other-cel/restrict-sa-automount-sa-token/restrict-sa-automount-sa-token.yaml b/other-cel/restrict-sa-automount-sa-token/restrict-sa-automount-sa-token.yaml index 49e64b3fd..0ac25122f 100644 --- a/other-cel/restrict-sa-automount-sa-token/restrict-sa-automount-sa-token.yaml +++ b/other-cel/restrict-sa-automount-sa-token/restrict-sa-automount-sa-token.yaml @@ -25,6 +25,9 @@ spec: - resources: kinds: - ServiceAccount + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/restrict-secret-role-verbs/artifacthub-pkg.yml b/other-cel/restrict-secret-role-verbs/artifacthub-pkg.yml index 1880c3791..36daa4920 100644 --- a/other-cel/restrict-secret-role-verbs/artifacthub-pkg.yml +++ b/other-cel/restrict-secret-role-verbs/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Security in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Role, ClusterRole, RBAC" -digest: 87450e9a836bbad32f36134630508ba015ccf9935723807e688cf1e865564163 +digest: b3da9edeb06922d1f3c79a86b009b7bb3f8f5970791fcc839569fd238dfda97b createdAt: "2024-04-19T16:41:34Z" diff --git a/other-cel/restrict-secret-role-verbs/restrict-secret-role-verbs.yaml b/other-cel/restrict-secret-role-verbs/restrict-secret-role-verbs.yaml index 62ffe4ea0..173e950a9 100644 --- a/other-cel/restrict-secret-role-verbs/restrict-secret-role-verbs.yaml +++ b/other-cel/restrict-secret-role-verbs/restrict-secret-role-verbs.yaml @@ -28,6 +28,9 @@ spec: kinds: - Role - ClusterRole + operations: + - CREATE + - UPDATE validate: cel: variables: diff --git a/other-cel/restrict-secrets-by-name/artifacthub-pkg.yml b/other-cel/restrict-secrets-by-name/artifacthub-pkg.yml index 745aa5960..d1d1664ef 100644 --- a/other-cel/restrict-secrets-by-name/artifacthub-pkg.yml +++ b/other-cel/restrict-secrets-by-name/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Other in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod, Secret" -digest: 5ea208df44ec2a54705cc73244a374cb2593612182f9d4da8e17749e548e39ad +digest: 9a77d417ab9d59569a5e202ab0cdd73fc95d387b485be0003691b851e0065c50 createdAt: "2024-04-20T16:40:34Z" diff --git a/other-cel/restrict-secrets-by-name/restrict-secrets-by-name.yaml b/other-cel/restrict-secrets-by-name/restrict-secrets-by-name.yaml index 2f1403578..2ff8d8f8c 100644 --- a/other-cel/restrict-secrets-by-name/restrict-secrets-by-name.yaml +++ b/other-cel/restrict-secrets-by-name/restrict-secrets-by-name.yaml @@ -26,6 +26,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: variables: @@ -43,6 +46,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: variables: @@ -60,6 +66,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/restrict-service-port-range/artifacthub-pkg.yml b/other-cel/restrict-service-port-range/artifacthub-pkg.yml index 2ed9cf0b7..9869c5aab 100644 --- a/other-cel/restrict-service-port-range/artifacthub-pkg.yml +++ b/other-cel/restrict-service-port-range/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Other in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Service" -digest: 3813e2ff29ae51c28c1a9240be2893dc8a6f94e4229402012f587adbc4af7248 +digest: 76e5ca9f8c86c153ff8c31bf8dfe55ad665e0c6bbe3546c9e36edf515fee6965 createdAt: "2024-04-19T16:44:39Z" diff --git a/other-cel/restrict-service-port-range/restrict-service-port-range.yaml b/other-cel/restrict-service-port-range/restrict-service-port-range.yaml index 53da4a635..a037ed803 100644 --- a/other-cel/restrict-service-port-range/restrict-service-port-range.yaml +++ b/other-cel/restrict-service-port-range/restrict-service-port-range.yaml @@ -25,6 +25,9 @@ spec: - resources: kinds: - Service + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/restrict-storageclass/artifacthub-pkg.yml b/other-cel/restrict-storageclass/artifacthub-pkg.yml index 425918b25..3444d3c6a 100644 --- a/other-cel/restrict-storageclass/artifacthub-pkg.yml +++ b/other-cel/restrict-storageclass/artifacthub-pkg.yml @@ -20,6 +20,6 @@ annotations: kyverno/category: "Other, Multi-Tenancy in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "StorageClass" -digest: d210e624e126e2174723ce0b5d7426c641afe8dd45bd79e0380b98714bbb2633 +digest: 1056e484a63b688c416b32d7141a6ab9bd4d46224e9836e96ea80a584a1b0ba4 createdAt: "2024-04-20T16:43:16Z" diff --git a/other-cel/restrict-storageclass/restrict-storageclass.yaml b/other-cel/restrict-storageclass/restrict-storageclass.yaml index 59acee934..0913a76fc 100644 --- a/other-cel/restrict-storageclass/restrict-storageclass.yaml +++ b/other-cel/restrict-storageclass/restrict-storageclass.yaml @@ -26,6 +26,9 @@ spec: - resources: kinds: - StorageClass + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/restrict-usergroup-fsgroup-id/artifacthub-pkg.yml b/other-cel/restrict-usergroup-fsgroup-id/artifacthub-pkg.yml index 86f07618d..0719c1d5e 100644 --- a/other-cel/restrict-usergroup-fsgroup-id/artifacthub-pkg.yml +++ b/other-cel/restrict-usergroup-fsgroup-id/artifacthub-pkg.yml @@ -19,6 +19,6 @@ annotations: kyverno/category: "Sample in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: 4bc42c97f4d88453876a46d9988365409012bf1d4c864edd90cb11b3779ba2c1 +digest: b709760f5d54a7e1720885c487c7f6ba5db404e0aed99dfabdc093206b42092c createdAt: "2024-04-20T16:57:00Z" diff --git a/other-cel/restrict-usergroup-fsgroup-id/restrict-usergroup-fsgroup-id.yaml b/other-cel/restrict-usergroup-fsgroup-id/restrict-usergroup-fsgroup-id.yaml index bedfdc433..13a57b963 100644 --- a/other-cel/restrict-usergroup-fsgroup-id/restrict-usergroup-fsgroup-id.yaml +++ b/other-cel/restrict-usergroup-fsgroup-id/restrict-usergroup-fsgroup-id.yaml @@ -25,6 +25,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/restrict-wildcard-resources/artifacthub-pkg.yml b/other-cel/restrict-wildcard-resources/artifacthub-pkg.yml index 3cb768843..1a402cedb 100644 --- a/other-cel/restrict-wildcard-resources/artifacthub-pkg.yml +++ b/other-cel/restrict-wildcard-resources/artifacthub-pkg.yml @@ -20,6 +20,6 @@ annotations: kyverno/category: "Security, EKS Best Practices in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "ClusterRole, Role, RBAC" -digest: 23a66b076b1ec1a18888c8eefd740e2fcbffd910a0621c0ec5bb99b056c95d6f +digest: 02918a02f88cd193f14914db60e99be721c738789e063eeb77efe8eb80e1e30c createdAt: "2024-04-21T15:05:39Z" diff --git a/other-cel/restrict-wildcard-resources/restrict-wildcard-resources.yaml b/other-cel/restrict-wildcard-resources/restrict-wildcard-resources.yaml index 49949d931..2a47461a4 100644 --- a/other-cel/restrict-wildcard-resources/restrict-wildcard-resources.yaml +++ b/other-cel/restrict-wildcard-resources/restrict-wildcard-resources.yaml @@ -27,6 +27,9 @@ spec: kinds: - Role - ClusterRole + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/restrict-wildcard-verbs/artifacthub-pkg.yml b/other-cel/restrict-wildcard-verbs/artifacthub-pkg.yml index 4a867733a..72eb5a324 100644 --- a/other-cel/restrict-wildcard-verbs/artifacthub-pkg.yml +++ b/other-cel/restrict-wildcard-verbs/artifacthub-pkg.yml @@ -20,6 +20,6 @@ annotations: kyverno/category: "Security, EKS Best Practices in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Role, ClusterRole, RBAC" -digest: 6d79620ea475c5c6568760c38ce2803679fe0cb0792a5be4acfe9cdc9c2f45bd +digest: f94aaca4f8e88c242878b4c0ed47e5f3aaec1b1d05ffcb59b551f41a135bc7a7 createdAt: "2024-04-21T15:09:55Z" diff --git a/other-cel/restrict-wildcard-verbs/restrict-wildcard-verbs.yaml b/other-cel/restrict-wildcard-verbs/restrict-wildcard-verbs.yaml index 1a073022a..8aea74b48 100644 --- a/other-cel/restrict-wildcard-verbs/restrict-wildcard-verbs.yaml +++ b/other-cel/restrict-wildcard-verbs/restrict-wildcard-verbs.yaml @@ -27,6 +27,9 @@ spec: kinds: - Role - ClusterRole + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/other-cel/topologyspreadconstraints-policy/.chainsaw-test/chainsaw-test.yaml b/other-cel/topologyspreadconstraints-policy/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..d0c7d8295 --- /dev/null +++ b/other-cel/topologyspreadconstraints-policy/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,31 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: topologyspreadconstraints-policy +spec: + steps: + - name: step-01 + try: + - apply: + file: ../topologyspreadconstraints-policy.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: topologyspreadconstraints-policy + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: podcontrollers-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: podcontrollers-bad.yaml diff --git a/other-cel/topologyspreadconstraints-policy/.chainsaw-test/podcontrollers-bad.yaml b/other-cel/topologyspreadconstraints-policy/.chainsaw-test/podcontrollers-bad.yaml new file mode 100644 index 000000000..c704109f2 --- /dev/null +++ b/other-cel/topologyspreadconstraints-policy/.chainsaw-test/podcontrollers-bad.yaml @@ -0,0 +1,85 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeploy01 + labels: + app: busybox +spec: + selector: + matchLabels: + app: busybox + replicas: 3 + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: foo.bar/test + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: busybox + - maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: busybox +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeploy02 + labels: + app: busybox +spec: + selector: + matchLabels: + app: busybox + replicas: 3 + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: foo.bar/test + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: busybox + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: busybox +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeploy03 + labels: + app: busybox +spec: + selector: + matchLabels: + app: busybox + replicas: 3 + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 \ No newline at end of file diff --git a/other-cel/topologyspreadconstraints-policy/.chainsaw-test/podcontrollers-good.yaml b/other-cel/topologyspreadconstraints-policy/.chainsaw-test/podcontrollers-good.yaml new file mode 100644 index 000000000..9f9c9ad53 --- /dev/null +++ b/other-cel/topologyspreadconstraints-policy/.chainsaw-test/podcontrollers-good.yaml @@ -0,0 +1,91 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeploy01 + labels: + app: busybox +spec: + selector: + matchLabels: + app: busybox + replicas: 3 + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: foo.bar/test + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: busybox + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: busybox + - maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: busybox +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeploy02 + labels: + app: busybox +spec: + selector: + matchLabels: + app: busybox + replicas: 1 + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: foo.bar/test + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: busybox + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: busybox +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeploy03 + labels: + app: busybox +spec: + selector: + matchLabels: + app: busybox + replicas: 1 + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 \ No newline at end of file diff --git a/other-cel/topologyspreadconstraints-policy/.chainsaw-test/policy-ready.yaml b/other-cel/topologyspreadconstraints-policy/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..080e44efe --- /dev/null +++ b/other-cel/topologyspreadconstraints-policy/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: topologyspreadconstraints-policy +status: + ready: true diff --git a/other-cel/topologyspreadconstraints-policy/.kyverno-test/kyverno-test.yaml b/other-cel/topologyspreadconstraints-policy/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..d1e1c210e --- /dev/null +++ b/other-cel/topologyspreadconstraints-policy/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,33 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: topologyspreadconstraints +policies: +- ../topologyspreadconstraints-policy.yaml +resources: +- resource-fail1.yaml +- resource-fail2.yaml +- resource-fail3.yaml +- resource-pass.yaml +- resource-skip.yaml +results: +- kind: StatefulSet + policy: topologyspreadconstraints-policy + resources: + - monitoring/badss01 + - monitoring/badss02 + - monitoring/badss03 + result: fail + rule: spread-pods +- kind: StatefulSet + policy: topologyspreadconstraints-policy + resources: + - monitoring/goodss01 + result: pass + rule: spread-pods +- kind: StatefulSet + policy: topologyspreadconstraints-policy + resources: + - monitoring/skipss01 + result: skip + rule: spread-pods diff --git a/other-cel/topologyspreadconstraints-policy/.kyverno-test/resource-fail1.yaml b/other-cel/topologyspreadconstraints-policy/.kyverno-test/resource-fail1.yaml new file mode 100644 index 000000000..065a47ff5 --- /dev/null +++ b/other-cel/topologyspreadconstraints-policy/.kyverno-test/resource-fail1.yaml @@ -0,0 +1,46 @@ +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: badss01 + namespace: monitoring + labels: + app: thanos-memcached +spec: + selector: + matchLabels: + app: thanos-memcached + serviceName: memcached + replicas: 3 + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app: thanos-memcached + spec: + containers: + - name: memcached + image: memcached:1.6.17-alpine + command: + - memcached + - -m 2048 + - -o + - modern + - -v + ports: + - name: tcp-memcached + containerPort: 11211 + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: foo.bar/test + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: thanos-memcached + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: thanos-memcached diff --git a/other-cel/topologyspreadconstraints-policy/.kyverno-test/resource-fail2.yaml b/other-cel/topologyspreadconstraints-policy/.kyverno-test/resource-fail2.yaml new file mode 100644 index 000000000..0031995fc --- /dev/null +++ b/other-cel/topologyspreadconstraints-policy/.kyverno-test/resource-fail2.yaml @@ -0,0 +1,46 @@ +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: badss02 + namespace: monitoring + labels: + app: thanos-memcached +spec: + selector: + matchLabels: + app: thanos-memcached + serviceName: memcached + replicas: 3 + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app: thanos-memcached + spec: + containers: + - name: memcached + image: memcached:1.6.17-alpine + command: + - memcached + - -m 2048 + - -o + - modern + - -v + ports: + - name: tcp-memcached + containerPort: 11211 + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: foo.bar/test + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: thanos-memcached + - maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: thanos-memcached diff --git a/other-cel/topologyspreadconstraints-policy/.kyverno-test/resource-fail3.yaml b/other-cel/topologyspreadconstraints-policy/.kyverno-test/resource-fail3.yaml new file mode 100644 index 000000000..d88b17bf3 --- /dev/null +++ b/other-cel/topologyspreadconstraints-policy/.kyverno-test/resource-fail3.yaml @@ -0,0 +1,33 @@ +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: badss03 + namespace: monitoring + labels: + app: thanos-memcached +spec: + selector: + matchLabels: + app: thanos-memcached + serviceName: memcached + replicas: 3 + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app: thanos-memcached + spec: + containers: + - name: memcached + image: memcached:1.6.17-alpine + command: + - memcached + - -m 2048 + - -o + - modern + - -v + ports: + - name: tcp-memcached + containerPort: 11211 diff --git a/other-cel/topologyspreadconstraints-policy/.kyverno-test/resource-pass.yaml b/other-cel/topologyspreadconstraints-policy/.kyverno-test/resource-pass.yaml new file mode 100644 index 000000000..0310e6b00 --- /dev/null +++ b/other-cel/topologyspreadconstraints-policy/.kyverno-test/resource-pass.yaml @@ -0,0 +1,52 @@ +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: goodss01 + namespace: monitoring + labels: + app: thanos-memcached +spec: + selector: + matchLabels: + app: thanos-memcached + serviceName: memcached + replicas: 3 + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app: thanos-memcached + spec: + containers: + - name: memcached + image: memcached:1.6.17-alpine + command: + - memcached + - -m 2048 + - -o + - modern + - -v + ports: + - name: tcp-memcached + containerPort: 11211 + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: foo.bar/test + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: thanos-memcached + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: thanos-memcached + - maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: thanos-memcached diff --git a/other-cel/topologyspreadconstraints-policy/.kyverno-test/resource-skip.yaml b/other-cel/topologyspreadconstraints-policy/.kyverno-test/resource-skip.yaml new file mode 100644 index 000000000..6761e7076 --- /dev/null +++ b/other-cel/topologyspreadconstraints-policy/.kyverno-test/resource-skip.yaml @@ -0,0 +1,52 @@ +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: skipss01 + namespace: monitoring + labels: + app: thanos-memcached +spec: + selector: + matchLabels: + app: thanos-memcached + serviceName: memcached + replicas: 1 + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app: thanos-memcached + spec: + containers: + - name: memcached + image: memcached:1.6.17-alpine + command: + - memcached + - -m 2048 + - -o + - modern + - -v + ports: + - name: tcp-memcached + containerPort: 11211 + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: foo.bar/test + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: thanos-memcached + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: thanos-memcached + - maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: thanos-memcached diff --git a/other-cel/topologyspreadconstraints-policy/artifacthub-pkg.yml b/other-cel/topologyspreadconstraints-policy/artifacthub-pkg.yml new file mode 100644 index 000000000..3d251a745 --- /dev/null +++ b/other-cel/topologyspreadconstraints-policy/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: topologyspreadconstraints-policy-cel +version: 1.0.0 +displayName: Spread Pods Across Nodes & Zones in CEL expressions +description: >- + Deployments to a Kubernetes cluster with multiple availability zones often need to distribute those replicas to align with those zones to ensure site-level failures do not impact availability. This policy ensures topologySpreadConstraints are defined, to spread pods over nodes and zones. Deployments or Statefulsets with leass than 3 replicas are skipped. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/topologyspreadconstraints-policy/topologyspreadconstraints-policy.yaml + ``` +keywords: + - kyverno + - Sample + - CEL Expressions +readme: | + Deployments to a Kubernetes cluster with multiple availability zones often need to distribute those replicas to align with those zones to ensure site-level failures do not impact availability. This policy ensures topologySpreadConstraints are defined, to spread pods over nodes and zones. Deployments or Statefulsets with leass than 3 replicas are skipped. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Sample in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Deployment, StatefulSet" +digest: bd9dae9c99706fe3d16d26f59bd1bb8ecdaf09ffb038d79e8906fb8c72ec3b0f +createdAt: "2024-04-29T15:49:11Z" + diff --git a/other-cel/topologyspreadconstraints-policy/topologyspreadconstraints-policy.yaml b/other-cel/topologyspreadconstraints-policy/topologyspreadconstraints-policy.yaml new file mode 100644 index 000000000..858bfb197 --- /dev/null +++ b/other-cel/topologyspreadconstraints-policy/topologyspreadconstraints-policy.yaml @@ -0,0 +1,44 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: topologyspreadconstraints-policy + annotations: + policies.kyverno.io/title: Spread Pods Across Nodes & Zones in CEL expressions + kyverno.io/kubernetes-version: "1.26-1.27" + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/category: Sample in CEL + policies.kyverno.io/description: >- + Deployments to a Kubernetes cluster with multiple availability zones often need to + distribute those replicas to align with those zones to ensure site-level failures + do not impact availability. This policy ensures topologySpreadConstraints are defined, + to spread pods over nodes and zones. Deployments or Statefulsets with less than 3 + replicas are skipped. + policies.kyverno.io/minversion: 1.11.0 + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Deployment, StatefulSet +spec: + background: true + failurePolicy: Ignore + validationFailureAction: Audit + rules: + - name: spread-pods + match: + any: + - resources: + kinds: + - Deployment + - StatefulSet + operations: + - CREATE + - UPDATE + celPreconditions: + - name: "replicas-must-be-3-or-more" + expression: "object.spec.replicas >= 3" + validate: + cel: + expressions: + - expression: >- + has(object.spec.template.spec.topologySpreadConstraints) && + size(object.spec.template.spec.topologySpreadConstraints.filter(t, t.topologyKey == 'kubernetes.io/hostname' || t.topologyKey == 'topology.kubernetes.io/zone')) == 2 + message: "topologySpreadConstraint for kubernetes.io/hostname & topology.kubernetes.io/zone are required" + diff --git a/other/prevent-duplicate-hpa/.chainsaw-test/bad.yaml b/other/prevent-duplicate-hpa/.chainsaw-test/bad.yaml new file mode 100644 index 000000000..c9c7eb1d7 --- /dev/null +++ b/other/prevent-duplicate-hpa/.chainsaw-test/bad.yaml @@ -0,0 +1,12 @@ +apiVersion: autoscaling/v1 +kind: HorizontalPodAutoscaler +metadata: + name: bad-httpd-hpa +spec: + maxReplicas: 3 + minReplicas: 1 + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: httpd-deployment + targetCPUUtilizationPercentage: 50 diff --git a/other/prevent-duplicate-hpa/.chainsaw-test/chainsaw-test.yaml b/other/prevent-duplicate-hpa/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..77903eb72 --- /dev/null +++ b/other/prevent-duplicate-hpa/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,36 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: prevent-duplicate-hpa +spec: + steps: + - name: 01 - Create policy and Enforce + try: + - apply: + file: ../prevent-duplicate-hpa.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: prevent-duplicate-hpa + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: 02 - Create some unique HPAs + try: + - apply: + file: prereq.yaml + - name: 03 - Try to create duplicate HPAs + try: + - apply: + file: bad.yaml + expect: + - check: + ($error != null): true + - name: 04 - Create new unique HPAs + try: + - apply: + file: good.yaml diff --git a/other/prevent-duplicate-hpa/.chainsaw-test/good.yaml b/other/prevent-duplicate-hpa/.chainsaw-test/good.yaml new file mode 100644 index 000000000..19c4309c2 --- /dev/null +++ b/other/prevent-duplicate-hpa/.chainsaw-test/good.yaml @@ -0,0 +1,12 @@ +apiVersion: autoscaling/v1 +kind: HorizontalPodAutoscaler +metadata: + name: good-hpa +spec: + maxReplicas: 3 + minReplicas: 1 + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: redis-deployment + targetCPUUtilizationPercentage: 50 diff --git a/other/prevent-duplicate-hpa/.chainsaw-test/policy-ready.yaml b/other/prevent-duplicate-hpa/.chainsaw-test/policy-ready.yaml new file mode 100644 index 000000000..dc9963c0d --- /dev/null +++ b/other/prevent-duplicate-hpa/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: prevent-duplicate-hpa +status: + ready: true diff --git a/other/prevent-duplicate-hpa/.chainsaw-test/prereq.yaml b/other/prevent-duplicate-hpa/.chainsaw-test/prereq.yaml new file mode 100644 index 000000000..c399889fc --- /dev/null +++ b/other/prevent-duplicate-hpa/.chainsaw-test/prereq.yaml @@ -0,0 +1,12 @@ +apiVersion: autoscaling/v1 +kind: HorizontalPodAutoscaler +metadata: + name: httpd-hpa-exist +spec: + maxReplicas: 3 + minReplicas: 1 + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: httpd-deployment + targetCPUUtilizationPercentage: 50 diff --git a/other/prevent-duplicate-hpa/artifacthub-pkg.yml b/other/prevent-duplicate-hpa/artifacthub-pkg.yml new file mode 100644 index 000000000..6431e6018 --- /dev/null +++ b/other/prevent-duplicate-hpa/artifacthub-pkg.yml @@ -0,0 +1,32 @@ +name: prevent-duplicate-hpa +version: 1.0.0 +displayName: Prevent Duplicate HorizontalPodAutoscalers +createdAt: "2024-07-22T12:35:30Z" +description: >- + HorizontalPodAutoscaler (HPA) is useful to automatically adjust the number of pods in a deployment + or replication controller. It requires defining a specific target resource by kind and name. + There are no built-in validation checks by the HPA controller to prevent the creation of multiple HPAs + which target the same resource. This policy has two rules, the first of which ensures that the only targetRef + kinds accepted are one of either Deployment, StatefulSet, ReplicaSet, or DaemonSet. The second + prevents the creation of duplicate HPAs by validating that any new HPA targets a unique resource. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other/prevent-duplicate-hpa/prevent-duplicate-hpa.yaml + ``` +keywords: + - kyverno + - other +readme: | + HorizontalPodAutoscaler (HPA) is useful to automatically adjust the number of pods in a deployment + or replication controller. It requires defining a specific target resource by kind and name. + There are no built-in validation checks by the HPA controller to prevent the creation of multiple HPAs + which target the same resource. This policy has two rules, the first of which ensures that the only targetRef + kinds accepted are one of either Deployment, StatefulSet, ReplicaSet, or DaemonSet. The second + prevents the creation of duplicate HPAs by validating that any new HPA targets a unique resource. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Other" + kyverno/kubernetesVersion: "1.27" + kyverno/subject: "HorizontalPodAutoscaler" +digest: 32b4d593d3efa4b433dcc54f76aaaf140f3d654a3905261718ab89155930d880 diff --git a/other/prevent-duplicate-hpa/prevent-duplicate-hpa.yaml b/other/prevent-duplicate-hpa/prevent-duplicate-hpa.yaml new file mode 100644 index 000000000..ac625ce65 --- /dev/null +++ b/other/prevent-duplicate-hpa/prevent-duplicate-hpa.yaml @@ -0,0 +1,70 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: prevent-duplicate-hpa + annotations: + policies.kyverno.io/title: Prevent Duplicate HorizontalPodAutoscalers + policies.kyverno.io/category: Other + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.11.4 + kyverno.io/kubernetes-version: "1.27" + policies.kyverno.io/subject: HorizontalPodAutoscaler + policies.kyverno.io/description: >- + HorizontalPodAutoscaler (HPA) is useful to automatically adjust the number of pods in a deployment + or replication controller. It requires defining a specific target resource by kind and name. + There are no built-in validation checks by the HPA controller to prevent the creation of multiple HPAs + which target the same resource. This policy has two rules, the first of which ensures that the only targetRef + kinds accepted are one of either Deployment, StatefulSet, ReplicaSet, or DaemonSet. The second + prevents the creation of duplicate HPAs by validating that any new HPA targets a unique resource. +spec: + validationFailureAction: Audit + background: false + rules: + - name: verify-kind-name-duplicates + match: + any: + - resources: + kinds: + - HorizontalPodAutoscaler + operations: + - CREATE + validate: + message: >- + The target kind must be specified exactly as Deployment, StatefulSet, ReplicaSet, or DaemonSet. + pattern: + spec: + scaleTargetRef: + kind: Deployment | StatefulSet | ReplicaSet | DaemonSet + - name: check-targetref-duplicates + match: + any: + - resources: + kinds: + - HorizontalPodAutoscaler + operations: + - CREATE + preconditions: + all: + - key: + - Deployment + - StatefulSet + - ReplicaSet + - DaemonSet + operator: AnyIn + value: "{{ request.object.spec.scaleTargetRef.kind }}" + context: + - name: targets + apiCall: + urlPath: "/apis/autoscaling/v1/namespaces/{{ request.namespace }}/horizontalpodautoscalers" + jmesPath: "items[?spec.scaleTargetRef.kind=='{{ request.object.spec.scaleTargetRef.kind }}'].spec.scaleTargetRef.name" + validate: + message: >- + The target {{ request.object.spec.scaleTargetRef.kind }} named + {{ request.object.spec.scaleTargetRef.name }} already has an existing + HPA configured for it. Duplicate HPAs are not allowed. + deny: + conditions: + all: + - key: "{{ request.object.spec.scaleTargetRef.name }}" + operator: AnyIn + value: "{{ targets }}" diff --git a/other/require-container-port-names/.kyverno-test/kyverno-test.yaml b/other/require-container-port-names/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..c1054a710 --- /dev/null +++ b/other/require-container-port-names/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,52 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: require-container-port-names +policies: +- ../require-container-port-names.yaml +resources: +- ../.chainsaw-test/pod-bad.yaml +- ../.chainsaw-test/pod-good.yaml +- ../.chainsaw-test/podcontroller-bad.yaml +- ../.chainsaw-test/podcontroller-good.yaml +results: +- kind: Pod + policy: require-container-port-names + rule: port-name + resources: + - badpod01 + - badpod02 + - badpod03 + result: fail +- kind: Pod + policy: require-container-port-names + rule: port-name + resources: + - goodpod01 + - goodpod02 + result: pass +- kind: Deployment + policy: require-container-port-names + rule: port-name + resources: + - baddeployment01 + result: fail +- kind: CronJob + policy: require-container-port-names + rule: port-name + resources: + - badcronjob01 + result: fail +- kind: Deployment + policy: require-container-port-names + rule: port-name + resources: + - gooddeployment01 + result: pass +- kind: CronJob + policy: require-container-port-names + rule: port-name + resources: + - goodcronjob01 + result: pass + diff --git a/other/restrict-jobs/.chainsaw-test/cronjob.yaml b/other/restrict-jobs/.chainsaw-test/cronjob.yaml deleted file mode 100644 index 52ae70aa5..000000000 --- a/other/restrict-jobs/.chainsaw-test/cronjob.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: batch/v1 -kind: CronJob -metadata: - name: hello -spec: - schedule: "*/1 * * * *" - jobTemplate: - spec: - template: - spec: - containers: - - name: hello - image: busybox - args: - - /bin/sh - - -c - - date; echo Hello from the Kubernetes cluster - restartPolicy: OnFailure \ No newline at end of file diff --git a/other/restrict-jobs/.chainsaw-test/job.yaml b/other/restrict-jobs/.chainsaw-test/job.yaml deleted file mode 100644 index 4edf767e2..000000000 --- a/other/restrict-jobs/.chainsaw-test/job.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - name: pi -spec: - template: - spec: - containers: - - name: pi - image: perl - command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"] - restartPolicy: Never - backoffLimit: 4 \ No newline at end of file diff --git a/other/restrict-jobs/.kyverno-test/kyverno-test.yaml b/other/restrict-jobs/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..c2e9af736 --- /dev/null +++ b/other/restrict-jobs/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,21 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: restrict-jobs +policies: +- ../restrict-jobs.yaml +resources: +- resource.yaml +results: +- policy: restrict-jobs + rule: restrict-job-from-cronjob + kind: Job + resources: + - badjob + result: fail +- policy: restrict-jobs + rule: restrict-job-from-cronjob + kind: Job + resources: + - goodjob + result: skip diff --git a/other/restrict-jobs/.kyverno-test/resource.yaml b/other/restrict-jobs/.kyverno-test/resource.yaml new file mode 100644 index 000000000..6e48e4443 --- /dev/null +++ b/other/restrict-jobs/.kyverno-test/resource.yaml @@ -0,0 +1,32 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: badjob +spec: + template: + spec: + containers: + - name: busybox + image: busybox:1.35 + command: ["sleep", "3600"] + restartPolicy: Never +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: goodjob + ownerReferences: + - apiVersion: batch/v1 + blockOwnerDeletion: true + controller: true + kind: CronJob + name: goodcronjob01 + uid: a554d6b8-8b0a-44da-a9d9-d76a1f85b320 +spec: + template: + spec: + containers: + - name: busybox + image: busybox:1.35 + command: ["sleep", "3600"] + restartPolicy: Never \ No newline at end of file diff --git a/other/restrict-node-selection/.chainsaw-test/pod-bad.yaml b/other/restrict-node-selection/.chainsaw-test/pod-bad.yaml index 29c1bb319..dbaf54c33 100644 --- a/other/restrict-node-selection/.chainsaw-test/pod-bad.yaml +++ b/other/restrict-node-selection/.chainsaw-test/pod-bad.yaml @@ -4,8 +4,7 @@ metadata: name: badpod01 spec: nodeSelector: - matchLabels: - foo: bar + foo: bar containers: - name: busybox image: busybox:1.35 @@ -18,4 +17,4 @@ spec: nodeName: kind-control-plane containers: - name: busybox - image: busybox:1.35 \ No newline at end of file + image: busybox:1.35 diff --git a/other/restrict-node-selection/.chainsaw-test/podcontroller-bad.yaml b/other/restrict-node-selection/.chainsaw-test/podcontroller-bad.yaml index 67ef8da4b..985c6f597 100644 --- a/other/restrict-node-selection/.chainsaw-test/podcontroller-bad.yaml +++ b/other/restrict-node-selection/.chainsaw-test/podcontroller-bad.yaml @@ -16,8 +16,7 @@ spec: app: busybox spec: nodeSelector: - matchLabels: - foo: bar + foo: bar containers: - name: busybox image: busybox:1.35 @@ -36,4 +35,4 @@ spec: containers: - name: busybox image: busybox:1.35 - restartPolicy: OnFailure \ No newline at end of file + restartPolicy: OnFailure diff --git a/other/topologyspreadconstraints-policy/.kyverno-test/kyverno-test.yaml b/other/topologyspreadconstraints-policy/.kyverno-test/kyverno-test.yaml index e5d9efa7e..d1e1c210e 100644 --- a/other/topologyspreadconstraints-policy/.kyverno-test/kyverno-test.yaml +++ b/other/topologyspreadconstraints-policy/.kyverno-test/kyverno-test.yaml @@ -14,20 +14,20 @@ results: - kind: StatefulSet policy: topologyspreadconstraints-policy resources: - - monitoring/fail1 - - monitoring/fail2 - - monitoring/fail3 + - monitoring/badss01 + - monitoring/badss02 + - monitoring/badss03 result: fail rule: spread-pods - kind: StatefulSet policy: topologyspreadconstraints-policy resources: - - monitoring/pass + - monitoring/goodss01 result: pass rule: spread-pods - kind: StatefulSet policy: topologyspreadconstraints-policy resources: - - monitoring/skip + - monitoring/skipss01 result: skip rule: spread-pods diff --git a/other/topologyspreadconstraints-policy/.kyverno-test/resource-fail1.yaml b/other/topologyspreadconstraints-policy/.kyverno-test/resource-fail1.yaml index 143b295f1..065a47ff5 100644 --- a/other/topologyspreadconstraints-policy/.kyverno-test/resource-fail1.yaml +++ b/other/topologyspreadconstraints-policy/.kyverno-test/resource-fail1.yaml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: fail1 + name: badss01 namespace: monitoring labels: app: thanos-memcached diff --git a/other/topologyspreadconstraints-policy/.kyverno-test/resource-fail2.yaml b/other/topologyspreadconstraints-policy/.kyverno-test/resource-fail2.yaml index f61c1fab8..0031995fc 100644 --- a/other/topologyspreadconstraints-policy/.kyverno-test/resource-fail2.yaml +++ b/other/topologyspreadconstraints-policy/.kyverno-test/resource-fail2.yaml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: fail2 + name: badss02 namespace: monitoring labels: app: thanos-memcached diff --git a/other/topologyspreadconstraints-policy/.kyverno-test/resource-fail3.yaml b/other/topologyspreadconstraints-policy/.kyverno-test/resource-fail3.yaml index fc763ab04..d88b17bf3 100644 --- a/other/topologyspreadconstraints-policy/.kyverno-test/resource-fail3.yaml +++ b/other/topologyspreadconstraints-policy/.kyverno-test/resource-fail3.yaml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: fail3 + name: badss03 namespace: monitoring labels: app: thanos-memcached diff --git a/other/topologyspreadconstraints-policy/.kyverno-test/resource-pass.yaml b/other/topologyspreadconstraints-policy/.kyverno-test/resource-pass.yaml index 4e5f55339..0310e6b00 100644 --- a/other/topologyspreadconstraints-policy/.kyverno-test/resource-pass.yaml +++ b/other/topologyspreadconstraints-policy/.kyverno-test/resource-pass.yaml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: pass + name: goodss01 namespace: monitoring labels: app: thanos-memcached diff --git a/other/topologyspreadconstraints-policy/.kyverno-test/resource-skip.yaml b/other/topologyspreadconstraints-policy/.kyverno-test/resource-skip.yaml index 746608c08..6761e7076 100644 --- a/other/topologyspreadconstraints-policy/.kyverno-test/resource-skip.yaml +++ b/other/topologyspreadconstraints-policy/.kyverno-test/resource-skip.yaml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: skip + name: skipss01 namespace: monitoring labels: app: thanos-memcached diff --git a/other/verify-image-with-multi-keys/artifacthub-pkg.yml b/other/verify-image-with-multi-keys/artifacthub-pkg.yml index 886ef0e86..630316e2e 100644 --- a/other/verify-image-with-multi-keys/artifacthub-pkg.yml +++ b/other/verify-image-with-multi-keys/artifacthub-pkg.yml @@ -12,11 +12,11 @@ keywords: - kyverno - Software Supply Chain Security readme: | - There may be multiple keys used to sign images based on the parties involved in the creation process. This image verification policy requires the named image be signed by two separate keys. It will search for a global "production" key in a ConfigMap called `key` in the `default` Namespace and also a Namespace key in the same ConfigMap. + There may be multiple keys used to sign images based on the parties involved in the creation process. This image verification policy requires the named image be signed by two separate keys. It will search for a global "production" key in a ConfigMap called `keys` in the `default` Namespace and also a Namespace key in the same ConfigMap. Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ annotations: kyverno/category: "Software Supply Chain Security" kyverno/kubernetesVersion: "1.23" kyverno/subject: "Pod" -digest: 14cc8946fcc7d3141270826f036b28226c88c5d8e93ba475b1523e90512a281b +digest: 512b32c2d9e2bcf48907258ca01ff675efb4ed0d1967351ad161e50b20512d56 diff --git a/other/verify-image-with-multi-keys/verify-image-with-multi-keys.yaml b/other/verify-image-with-multi-keys/verify-image-with-multi-keys.yaml index 2c1592bde..4c77e2ce9 100644 --- a/other/verify-image-with-multi-keys/verify-image-with-multi-keys.yaml +++ b/other/verify-image-with-multi-keys/verify-image-with-multi-keys.yaml @@ -15,7 +15,7 @@ metadata: the parties involved in the creation process. This image verification policy requires the named image be signed by two separate keys. It will search for a global "production" - key in a ConfigMap called `key` in the `default` Namespace + key in a ConfigMap called `keys` in the `default` Namespace and also a Namespace key in the same ConfigMap. spec: validationFailureAction: enforce @@ -42,4 +42,4 @@ spec: - keys: publicKeys: "{{ keys.data.production }}" - keys: - publicKeys: "{{ keys.data.{{request.namespace}} }}" \ No newline at end of file + publicKeys: "{{ keys.data.{{request.namespace}} }}" diff --git a/pod-security-cel/baseline/disallow-capabilities/artifacthub-pkg.yml b/pod-security-cel/baseline/disallow-capabilities/artifacthub-pkg.yml index 2b3874699..7d4f6bd83 100644 --- a/pod-security-cel/baseline/disallow-capabilities/artifacthub-pkg.yml +++ b/pod-security-cel/baseline/disallow-capabilities/artifacthub-pkg.yml @@ -19,5 +19,5 @@ annotations: kyverno/category: "Pod Security Standards (Baseline) in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: 6a0ace9b1f5b3f25b34117db4936ba32c4fcbbdfe3d0dba9e61b6152dede3a53 +digest: e5f9cbb8246d36347c0fe62768e6b62b6b323efb7dd1ac60bc8c220e641220fb createdAt: "2023-12-03T00:22:33Z" diff --git a/pod-security-cel/baseline/disallow-capabilities/disallow-capabilities.yaml b/pod-security-cel/baseline/disallow-capabilities/disallow-capabilities.yaml index fc2a7e7e9..3084fec43 100644 --- a/pod-security-cel/baseline/disallow-capabilities/disallow-capabilities.yaml +++ b/pod-security-cel/baseline/disallow-capabilities/disallow-capabilities.yaml @@ -21,6 +21,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/pod-security-cel/baseline/disallow-host-namespaces/artifacthub-pkg.yml b/pod-security-cel/baseline/disallow-host-namespaces/artifacthub-pkg.yml index 8a73c2234..3bc6ebf30 100644 --- a/pod-security-cel/baseline/disallow-host-namespaces/artifacthub-pkg.yml +++ b/pod-security-cel/baseline/disallow-host-namespaces/artifacthub-pkg.yml @@ -19,5 +19,5 @@ annotations: kyverno/category: "Pod Security Standards (Baseline) in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: 82a3924f4d25ed9bfc8e49395c7b0e8922f5ad0573830747dd3cf96dfb93ad7a +digest: c57ee3440401887541c2d97727fc268d5cd9eb47faf00bea2f0ca738caffe483 createdAt: "2023-12-03T00:22:34Z" diff --git a/pod-security-cel/baseline/disallow-host-namespaces/disallow-host-namespaces.yaml b/pod-security-cel/baseline/disallow-host-namespaces/disallow-host-namespaces.yaml index 99f7f13d4..d3ac68ac1 100644 --- a/pod-security-cel/baseline/disallow-host-namespaces/disallow-host-namespaces.yaml +++ b/pod-security-cel/baseline/disallow-host-namespaces/disallow-host-namespaces.yaml @@ -24,6 +24,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/pod-security-cel/baseline/disallow-host-path/artifacthub-pkg.yml b/pod-security-cel/baseline/disallow-host-path/artifacthub-pkg.yml index b24cf337f..522af9210 100644 --- a/pod-security-cel/baseline/disallow-host-path/artifacthub-pkg.yml +++ b/pod-security-cel/baseline/disallow-host-path/artifacthub-pkg.yml @@ -19,5 +19,5 @@ annotations: kyverno/category: "Pod Security Standards (Baseline)" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod,Volume" -digest: 8f309db940eca3692840c78e2662ff0c25fa718cf0f468b58cdfd4c3d1011274 +digest: 7a78c73a64e61e91876d3ee30c99e1b39774ec885e881f4ffa0be11713710031 createdAt: "2023-12-03T00:22:34Z" diff --git a/pod-security-cel/baseline/disallow-host-path/disallow-host-path.yaml b/pod-security-cel/baseline/disallow-host-path/disallow-host-path.yaml index c9a2796c5..58bb7109c 100644 --- a/pod-security-cel/baseline/disallow-host-path/disallow-host-path.yaml +++ b/pod-security-cel/baseline/disallow-host-path/disallow-host-path.yaml @@ -23,6 +23,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/pod-security-cel/baseline/disallow-host-ports-range/artifacthub-pkg.yml b/pod-security-cel/baseline/disallow-host-ports-range/artifacthub-pkg.yml index b4c7ec08a..300feec3e 100644 --- a/pod-security-cel/baseline/disallow-host-ports-range/artifacthub-pkg.yml +++ b/pod-security-cel/baseline/disallow-host-ports-range/artifacthub-pkg.yml @@ -19,5 +19,5 @@ annotations: kyverno/category: "Pod Security Standards (Baseline) in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: 9ea35edfd0be8b253db73ce38ff124d191df9e34f90b01c70125add602a05ff3 +digest: e48d0f138fc501b4cc8726d2bc56dae5f0230b155744ea36eb08dfd5e51d823b createdAt: "2023-12-03T00:22:34Z" diff --git a/pod-security-cel/baseline/disallow-host-ports-range/disallow-host-ports-range.yaml b/pod-security-cel/baseline/disallow-host-ports-range/disallow-host-ports-range.yaml index 02910d760..d64e350a7 100644 --- a/pod-security-cel/baseline/disallow-host-ports-range/disallow-host-ports-range.yaml +++ b/pod-security-cel/baseline/disallow-host-ports-range/disallow-host-ports-range.yaml @@ -24,6 +24,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/pod-security-cel/baseline/disallow-host-ports/artifacthub-pkg.yml b/pod-security-cel/baseline/disallow-host-ports/artifacthub-pkg.yml index 78f8d9748..619a073ff 100644 --- a/pod-security-cel/baseline/disallow-host-ports/artifacthub-pkg.yml +++ b/pod-security-cel/baseline/disallow-host-ports/artifacthub-pkg.yml @@ -19,5 +19,5 @@ annotations: kyverno/category: "Pod Security Standards (Baseline) in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: e5c3baa87ccb5cbbaeb6594e12e4781c8fca0d72a5a513b2a6f8efc80e00b200 +digest: b95cfe16e11be0b9507736687bd99b5ea78c455f8fc35194220326ea5ff3913c createdAt: "2023-12-03T00:22:34Z" diff --git a/pod-security-cel/baseline/disallow-host-ports/disallow-host-ports.yaml b/pod-security-cel/baseline/disallow-host-ports/disallow-host-ports.yaml index cd4e0fd47..b7603ecfb 100644 --- a/pod-security-cel/baseline/disallow-host-ports/disallow-host-ports.yaml +++ b/pod-security-cel/baseline/disallow-host-ports/disallow-host-ports.yaml @@ -23,6 +23,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/pod-security-cel/baseline/disallow-host-process/artifacthub-pkg.yml b/pod-security-cel/baseline/disallow-host-process/artifacthub-pkg.yml index 20f4b2c21..50566dd41 100644 --- a/pod-security-cel/baseline/disallow-host-process/artifacthub-pkg.yml +++ b/pod-security-cel/baseline/disallow-host-process/artifacthub-pkg.yml @@ -19,5 +19,5 @@ annotations: kyverno/category: "Pod Security Standards (Baseline) in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: 33a4b3765e2a54711df4379c41babb8b92f748d784bc79df049fb4fd225633a1 +digest: e95ea8d0c8cc898714ad067f421d7ba822cb29920f56c0937bd0bfdb1e95ab1e createdAt: "2023-12-03T00:22:34Z" diff --git a/pod-security-cel/baseline/disallow-host-process/disallow-host-process.yaml b/pod-security-cel/baseline/disallow-host-process/disallow-host-process.yaml index 03b389530..a79349132 100644 --- a/pod-security-cel/baseline/disallow-host-process/disallow-host-process.yaml +++ b/pod-security-cel/baseline/disallow-host-process/disallow-host-process.yaml @@ -24,6 +24,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/pod-security-cel/baseline/disallow-privileged-containers/artifacthub-pkg.yml b/pod-security-cel/baseline/disallow-privileged-containers/artifacthub-pkg.yml index dba8a989f..d51943d7e 100644 --- a/pod-security-cel/baseline/disallow-privileged-containers/artifacthub-pkg.yml +++ b/pod-security-cel/baseline/disallow-privileged-containers/artifacthub-pkg.yml @@ -19,5 +19,5 @@ annotations: kyverno/category: "Pod Security Standards (Baseline) in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: 92aefb85dcf369f46733d0f04d289deddee34eb5d0b46860b41de9f9eeed2805 +digest: 6ef6ef12ea3680c1d610f056ed163539debdf195bed4a3ab688599d7dfaf82e8 createdAt: "2023-12-03T00:22:34Z" diff --git a/pod-security-cel/baseline/disallow-privileged-containers/disallow-privileged-containers.yaml b/pod-security-cel/baseline/disallow-privileged-containers/disallow-privileged-containers.yaml index e83abf748..df67481c9 100644 --- a/pod-security-cel/baseline/disallow-privileged-containers/disallow-privileged-containers.yaml +++ b/pod-security-cel/baseline/disallow-privileged-containers/disallow-privileged-containers.yaml @@ -23,6 +23,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/pod-security-cel/baseline/disallow-proc-mount/.chainsaw-test/chainsaw-test.yaml b/pod-security-cel/baseline/disallow-proc-mount/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..04baf8fe0 --- /dev/null +++ b/pod-security-cel/baseline/disallow-proc-mount/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,40 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: disallow-proc-mount +spec: + # disable templating because it can cause issues with CEL expressions + template: false + steps: + - name: step-01 + try: + - apply: + file: ../disallow-proc-mount.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: disallow-proc-mount + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: pod-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: pod-bad.yaml + - apply: + file: podcontroller-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: podcontroller-bad.yaml diff --git a/pod-security-cel/baseline/disallow-proc-mount/.chainsaw-test/pod-bad.yaml b/pod-security-cel/baseline/disallow-proc-mount/.chainsaw-test/pod-bad.yaml new file mode 100644 index 000000000..623c582d3 --- /dev/null +++ b/pod-security-cel/baseline/disallow-proc-mount/.chainsaw-test/pod-bad.yaml @@ -0,0 +1,73 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod01 +spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + procMount: Unmasked +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod02 +spec: + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + procMount: Unmasked +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod03 +spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + procMount: Unmasked + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod04 +spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + - name: initcontainer02 + image: dummyimagename + securityContext: + procMount: Unmasked + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod05 +spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + - name: initcontainer02 + image: dummyimagename + securityContext: + procMount: Unmasked + containers: + - name: container01 + image: dummyimagename + securityContext: + procMount: Unmasked +--- \ No newline at end of file diff --git a/pod-security-cel/baseline/disallow-proc-mount/.chainsaw-test/pod-good.yaml b/pod-security-cel/baseline/disallow-proc-mount/.chainsaw-test/pod-good.yaml new file mode 100644 index 000000000..747d648e2 --- /dev/null +++ b/pod-security-cel/baseline/disallow-proc-mount/.chainsaw-test/pod-good.yaml @@ -0,0 +1,78 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod01 +spec: + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod02 +spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + procMount: Default +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod03 +spec: + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + procMount: Default +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod04 +spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod05 +spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + procMount: Default + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod06 +spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + - name: initcontainer02 + image: dummyimagename + securityContext: + procMount: Default + containers: + - name: container01 + image: dummyimagename + securityContext: + procMount: Default +--- \ No newline at end of file diff --git a/pod-security-cel/baseline/disallow-proc-mount/.chainsaw-test/podcontroller-bad.yaml b/pod-security-cel/baseline/disallow-proc-mount/.chainsaw-test/podcontroller-bad.yaml new file mode 100644 index 000000000..b719c34b3 --- /dev/null +++ b/pod-security-cel/baseline/disallow-proc-mount/.chainsaw-test/podcontroller-bad.yaml @@ -0,0 +1,220 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + procMount: Unmasked +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment02 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + procMount: Unmasked +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment03 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + procMount: Unmasked + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment04 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + - name: initcontainer02 + image: dummyimagename + securityContext: + procMount: Unmasked + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment05 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + - name: initcontainer02 + image: dummyimagename + securityContext: + procMount: Unmasked + containers: + - name: container01 + image: dummyimagename + securityContext: + procMount: Unmasked +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob01 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + securityContext: + procMount: Unmasked +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob02 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + procMount: Unmasked +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob03 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + procMount: Unmasked + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob04 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: dummyimagename + - name: initcontainer02 + image: dummyimagename + securityContext: + procMount: Unmasked + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob05 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: dummyimagename + - name: initcontainer02 + image: dummyimagename + securityContext: + procMount: Unmasked + containers: + - name: container01 + image: dummyimagename + securityContext: + procMount: Unmasked +--- \ No newline at end of file diff --git a/pod-security-cel/baseline/disallow-proc-mount/.chainsaw-test/podcontroller-good.yaml b/pod-security-cel/baseline/disallow-proc-mount/.chainsaw-test/podcontroller-good.yaml new file mode 100644 index 000000000..83e0d5aac --- /dev/null +++ b/pod-security-cel/baseline/disallow-proc-mount/.chainsaw-test/podcontroller-good.yaml @@ -0,0 +1,245 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment02 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + procMount: Default +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment03 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + procMount: Default +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment04 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment05 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + procMount: Default + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment06 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + procMount: Default + - name: initcontainer02 + image: dummyimagename + containers: + - name: container01 + image: dummyimagename + securityContext: + procMount: Default +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob01 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob02 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + securityContext: + procMount: Default +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob03 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + procMount: Default +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob04 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: dummyimagename + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob05 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + procMount: Default + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob06 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: dummyimagename + - name: initcontainer02 + image: dummyimagename + securityContext: + procMount: Default + containers: + - name: container01 + image: dummyimagename + securityContext: + procMount: Default +--- \ No newline at end of file diff --git a/pod-security-cel/baseline/disallow-proc-mount/.chainsaw-test/policy-ready.yaml b/pod-security-cel/baseline/disallow-proc-mount/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..87ef3bbcb --- /dev/null +++ b/pod-security-cel/baseline/disallow-proc-mount/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: disallow-proc-mount +status: + ready: true diff --git a/pod-security-cel/baseline/disallow-proc-mount/artifacthub-pkg.yml b/pod-security-cel/baseline/disallow-proc-mount/artifacthub-pkg.yml index a7d794ade..8ee2a461b 100644 --- a/pod-security-cel/baseline/disallow-proc-mount/artifacthub-pkg.yml +++ b/pod-security-cel/baseline/disallow-proc-mount/artifacthub-pkg.yml @@ -19,5 +19,5 @@ annotations: kyverno/category: "Pod Security Standards (Baseline) in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: 32dc701fa3d0c360f9e383d7dd149c2406a5a61d3f1f43c65dc61be6623aa904 +digest: b836600d6ae7f490ba39f55df45fa599c88a5c76386ee6faf8a6609ff626179b createdAt: "2023-12-03T00:22:33Z" diff --git a/pod-security-cel/baseline/disallow-proc-mount/disallow-proc-mount.yaml b/pod-security-cel/baseline/disallow-proc-mount/disallow-proc-mount.yaml index 8c28c4a33..42dbbcf9e 100644 --- a/pod-security-cel/baseline/disallow-proc-mount/disallow-proc-mount.yaml +++ b/pod-security-cel/baseline/disallow-proc-mount/disallow-proc-mount.yaml @@ -25,6 +25,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/pod-security-cel/baseline/disallow-selinux/artifacthub-pkg.yml b/pod-security-cel/baseline/disallow-selinux/artifacthub-pkg.yml index 5f23ce09f..a0d6fa163 100644 --- a/pod-security-cel/baseline/disallow-selinux/artifacthub-pkg.yml +++ b/pod-security-cel/baseline/disallow-selinux/artifacthub-pkg.yml @@ -19,5 +19,5 @@ annotations: kyverno/category: "Pod Security Standards (Baseline) in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: fc7d48f00d32dc6b04d5ffc453c2749319154ec90ba6309ce030141c6536eb87 +digest: d842a1741805d9480e9a571a80117f4e2c6210b0d984d1c22e54545c3df9dd0d createdAt: "2023-12-03T00:22:33Z" diff --git a/pod-security-cel/baseline/disallow-selinux/disallow-selinux.yaml b/pod-security-cel/baseline/disallow-selinux/disallow-selinux.yaml index 901d1f86d..983ebe1f1 100644 --- a/pod-security-cel/baseline/disallow-selinux/disallow-selinux.yaml +++ b/pod-security-cel/baseline/disallow-selinux/disallow-selinux.yaml @@ -23,6 +23,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: @@ -77,6 +80,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/pod-security-cel/baseline/restrict-seccomp/artifacthub-pkg.yml b/pod-security-cel/baseline/restrict-seccomp/artifacthub-pkg.yml index e90895082..359c6c7bd 100644 --- a/pod-security-cel/baseline/restrict-seccomp/artifacthub-pkg.yml +++ b/pod-security-cel/baseline/restrict-seccomp/artifacthub-pkg.yml @@ -19,5 +19,5 @@ annotations: kyverno/category: "Pod Security Standards (Baseline) in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: d21b5941cd9dabf326d60c8b6f8ca2fbfbd0ab3358d47e87f6a0d16419cf4213 +digest: ba179d3d3d4435152b80e3aefbae44edd59b2300cd30395cde1c0a015e135f09 createdAt: "2023-12-03T00:22:34Z" diff --git a/pod-security-cel/baseline/restrict-seccomp/restrict-seccomp.yaml b/pod-security-cel/baseline/restrict-seccomp/restrict-seccomp.yaml index 2cbde252b..e54a3c69b 100644 --- a/pod-security-cel/baseline/restrict-seccomp/restrict-seccomp.yaml +++ b/pod-security-cel/baseline/restrict-seccomp/restrict-seccomp.yaml @@ -24,6 +24,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/pod-security-cel/baseline/restrict-sysctls/artifacthub-pkg.yml b/pod-security-cel/baseline/restrict-sysctls/artifacthub-pkg.yml index 40023883c..737e81be9 100644 --- a/pod-security-cel/baseline/restrict-sysctls/artifacthub-pkg.yml +++ b/pod-security-cel/baseline/restrict-sysctls/artifacthub-pkg.yml @@ -19,5 +19,5 @@ annotations: kyverno/category: "Pod Security Standards (Baseline) in CEL" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: d2559783b696ce27a5b0684fd65a37cb9cef2e62c6ee39138de27283c5adbeb6 +digest: 97f75f8cdd2e3ee9f9696cdceccc34cf0df5edbca0e3bbab76572494a26ce6e8 createdAt: "2023-12-03T00:22:33Z" diff --git a/pod-security-cel/baseline/restrict-sysctls/restrict-sysctls.yaml b/pod-security-cel/baseline/restrict-sysctls/restrict-sysctls.yaml index 91cacc773..d564df539 100644 --- a/pod-security-cel/baseline/restrict-sysctls/restrict-sysctls.yaml +++ b/pod-security-cel/baseline/restrict-sysctls/restrict-sysctls.yaml @@ -27,6 +27,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/pod-security-cel/restricted/disallow-capabilities-strict/artifacthub-pkg.yml b/pod-security-cel/restricted/disallow-capabilities-strict/artifacthub-pkg.yml index d53a4eece..522932ea4 100644 --- a/pod-security-cel/restricted/disallow-capabilities-strict/artifacthub-pkg.yml +++ b/pod-security-cel/restricted/disallow-capabilities-strict/artifacthub-pkg.yml @@ -19,5 +19,5 @@ annotations: kyverno/category: "Pod Security Standards (Restricted)" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: 3ba20799de8e2ff846fc1e064fac7b3e0cf318f2d127161bf9e9f90d76aff4da +digest: 45c37cb004764c8fa03d95a018511660b1a6dc5b57752bfa8400384bf5c5037e createdAt: "2023-12-04T09:04:49Z" diff --git a/pod-security-cel/restricted/disallow-capabilities-strict/disallow-capabilities-strict.yaml b/pod-security-cel/restricted/disallow-capabilities-strict/disallow-capabilities-strict.yaml index cfe5d55fd..843e3ee55 100644 --- a/pod-security-cel/restricted/disallow-capabilities-strict/disallow-capabilities-strict.yaml +++ b/pod-security-cel/restricted/disallow-capabilities-strict/disallow-capabilities-strict.yaml @@ -23,6 +23,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: message: >- Containers must drop `ALL` capabilities. @@ -53,6 +56,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/pod-security-cel/restricted/disallow-privilege-escalation/artifacthub-pkg.yml b/pod-security-cel/restricted/disallow-privilege-escalation/artifacthub-pkg.yml index d1d87148f..2985acbe3 100644 --- a/pod-security-cel/restricted/disallow-privilege-escalation/artifacthub-pkg.yml +++ b/pod-security-cel/restricted/disallow-privilege-escalation/artifacthub-pkg.yml @@ -19,5 +19,5 @@ annotations: kyverno/category: "Pod Security Standards (Restricted)" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: a656fbec861a5420caab9ad15abf28edf45b47c6d749c3d3943223dfb4d37d7a +digest: 6c249b689ee08cc1edcbacf7a00a35cab98d5b1b2bf3fc7ebd8a0dd1e27bb2c1 createdAt: "2023-12-04T09:04:49Z" diff --git a/pod-security-cel/restricted/disallow-privilege-escalation/disallow-privilege-escalation.yaml b/pod-security-cel/restricted/disallow-privilege-escalation/disallow-privilege-escalation.yaml index cde75c193..5fc1229ae 100644 --- a/pod-security-cel/restricted/disallow-privilege-escalation/disallow-privilege-escalation.yaml +++ b/pod-security-cel/restricted/disallow-privilege-escalation/disallow-privilege-escalation.yaml @@ -23,6 +23,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/pod-security-cel/restricted/require-run-as-non-root-user/artifacthub-pkg.yml b/pod-security-cel/restricted/require-run-as-non-root-user/artifacthub-pkg.yml index 1e97e8811..dcc49bd50 100644 --- a/pod-security-cel/restricted/require-run-as-non-root-user/artifacthub-pkg.yml +++ b/pod-security-cel/restricted/require-run-as-non-root-user/artifacthub-pkg.yml @@ -19,5 +19,5 @@ annotations: kyverno/category: "Pod Security Standards (Restricted)" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: 4325ec1161eb1a2eb361eaed9618b7fe4605bfa621361064a43b4f056f03da8a +digest: 9351f7b7a1218dfad02538d36423edd15d7b567cc014833e701d0b1e771f1db1 createdAt: "2023-12-04T09:04:49Z" diff --git a/pod-security-cel/restricted/require-run-as-non-root-user/require-run-as-non-root-user.yaml b/pod-security-cel/restricted/require-run-as-non-root-user/require-run-as-non-root-user.yaml index 96e3e2ca7..0bd042b0f 100644 --- a/pod-security-cel/restricted/require-run-as-non-root-user/require-run-as-non-root-user.yaml +++ b/pod-security-cel/restricted/require-run-as-non-root-user/require-run-as-non-root-user.yaml @@ -23,6 +23,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/pod-security-cel/restricted/require-run-as-nonroot/artifacthub-pkg.yml b/pod-security-cel/restricted/require-run-as-nonroot/artifacthub-pkg.yml index 07e0bf4e0..2b56451d3 100644 --- a/pod-security-cel/restricted/require-run-as-nonroot/artifacthub-pkg.yml +++ b/pod-security-cel/restricted/require-run-as-nonroot/artifacthub-pkg.yml @@ -19,5 +19,5 @@ annotations: kyverno/category: "Pod Security Standards (Restricted)" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: e5da00b527052b933dd6e95a5e0cc52c857dad54130cbcc8022ccde1b526fb71 +digest: 91161d7046bc3d1900363fa4f44ab06c5be6aad62f6194f6635d5a7585c0dec7 createdAt: "2023-12-04T09:04:49Z" diff --git a/pod-security-cel/restricted/require-run-as-nonroot/require-run-as-nonroot.yaml b/pod-security-cel/restricted/require-run-as-nonroot/require-run-as-nonroot.yaml index 68553d5c8..268fd2340 100644 --- a/pod-security-cel/restricted/require-run-as-nonroot/require-run-as-nonroot.yaml +++ b/pod-security-cel/restricted/require-run-as-nonroot/require-run-as-nonroot.yaml @@ -23,6 +23,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/pod-security-cel/restricted/restrict-seccomp-strict/artifacthub-pkg.yml b/pod-security-cel/restricted/restrict-seccomp-strict/artifacthub-pkg.yml index 527f9037a..3bfeea190 100644 --- a/pod-security-cel/restricted/restrict-seccomp-strict/artifacthub-pkg.yml +++ b/pod-security-cel/restricted/restrict-seccomp-strict/artifacthub-pkg.yml @@ -19,5 +19,5 @@ annotations: kyverno/category: "Pod Security Standards (Restricted)" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod" -digest: 4deffb0a892939288dabf65e9af18732036a464ae3611028a96ae02215140e77 +digest: d31a60d3f693829fa8a17272e9f0e4d7cbbe2773a7e1a282bfc426dbe2e17e9e createdAt: "2023-12-04T09:04:49Z" diff --git a/pod-security-cel/restricted/restrict-seccomp-strict/restrict-seccomp-strict.yaml b/pod-security-cel/restricted/restrict-seccomp-strict/restrict-seccomp-strict.yaml index ed58c9641..b1c75662f 100644 --- a/pod-security-cel/restricted/restrict-seccomp-strict/restrict-seccomp-strict.yaml +++ b/pod-security-cel/restricted/restrict-seccomp-strict/restrict-seccomp-strict.yaml @@ -26,6 +26,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/pod-security-cel/restricted/restrict-volume-types/.chainsaw-test/chainsaw-test.yaml b/pod-security-cel/restricted/restrict-volume-types/.chainsaw-test/chainsaw-test.yaml index a1f934ab1..8c6194e7f 100755 --- a/pod-security-cel/restricted/restrict-volume-types/.chainsaw-test/chainsaw-test.yaml +++ b/pod-security-cel/restricted/restrict-volume-types/.chainsaw-test/chainsaw-test.yaml @@ -10,8 +10,6 @@ spec: steps: - name: step-01 try: - - apply: - file: ns.yaml - apply: file: ../restrict-volume-types.yaml - patch: @@ -43,5 +41,8 @@ spec: - name: step-99 try: - script: - content: kubectl delete all --all --force --grace-period=0 -n restrict-voltypes-ns + env: + - name: NAMESPACE + value: $namespace + content: kubectl delete all --all --force --grace-period=0 -n $NAMESPACE diff --git a/pod-security-cel/restricted/restrict-volume-types/.chainsaw-test/ns.yaml b/pod-security-cel/restricted/restrict-volume-types/.chainsaw-test/ns.yaml deleted file mode 100644 index 9cde8be39..000000000 --- a/pod-security-cel/restricted/restrict-volume-types/.chainsaw-test/ns.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: restrict-voltypes-ns \ No newline at end of file diff --git a/pod-security-cel/restricted/restrict-volume-types/.chainsaw-test/pod-good.yaml b/pod-security-cel/restricted/restrict-volume-types/.chainsaw-test/pod-good.yaml index a12d37f25..4ea15fd1d 100644 --- a/pod-security-cel/restricted/restrict-volume-types/.chainsaw-test/pod-good.yaml +++ b/pod-security-cel/restricted/restrict-volume-types/.chainsaw-test/pod-good.yaml @@ -1,7 +1,6 @@ apiVersion: v1 kind: Pod metadata: - namespace: restrict-voltypes-ns name: goodpod01 spec: containers: @@ -11,7 +10,6 @@ spec: apiVersion: v1 kind: Pod metadata: - namespace: restrict-voltypes-ns name: goodpod02 spec: containers: @@ -27,7 +25,6 @@ spec: apiVersion: v1 kind: Pod metadata: - namespace: restrict-voltypes-ns name: goodpod03 spec: containers: @@ -44,7 +41,6 @@ spec: apiVersion: v1 kind: Pod metadata: - namespace: restrict-voltypes-ns name: goodpod04 spec: containers: @@ -63,7 +59,6 @@ spec: apiVersion: v1 kind: Pod metadata: - namespace: restrict-voltypes-ns name: goodpod05 labels: foo: bar @@ -85,7 +80,6 @@ spec: apiVersion: v1 kind: Pod metadata: - namespace: restrict-voltypes-ns name: goodpod06 spec: containers: @@ -111,7 +105,6 @@ spec: apiVersion: v1 kind: Pod metadata: - namespace: restrict-voltypes-ns name: goodpod07 spec: containers: @@ -129,7 +122,6 @@ spec: apiVersion: v1 kind: Pod metadata: - namespace: restrict-voltypes-ns name: goodpod08 spec: containers: @@ -150,7 +142,6 @@ spec: apiVersion: v1 kind: Pod metadata: - namespace: restrict-voltypes-ns name: goodpod09 spec: containers: diff --git a/pod-security-cel/restricted/restrict-volume-types/.chainsaw-test/podcontroller-good.yaml b/pod-security-cel/restricted/restrict-volume-types/.chainsaw-test/podcontroller-good.yaml index 45378d1e6..26c344b15 100644 --- a/pod-security-cel/restricted/restrict-volume-types/.chainsaw-test/podcontroller-good.yaml +++ b/pod-security-cel/restricted/restrict-volume-types/.chainsaw-test/podcontroller-good.yaml @@ -1,7 +1,6 @@ apiVersion: apps/v1 kind: Deployment metadata: - namespace: restrict-voltypes-ns name: gooddeployment01 spec: replicas: 1 @@ -20,7 +19,6 @@ spec: apiVersion: apps/v1 kind: Deployment metadata: - namespace: restrict-voltypes-ns name: gooddeployment02 spec: replicas: 1 @@ -45,7 +43,6 @@ spec: apiVersion: apps/v1 kind: Deployment metadata: - namespace: restrict-voltypes-ns name: gooddeployment05 spec: replicas: 1 @@ -75,7 +72,6 @@ spec: apiVersion: apps/v1 kind: Deployment metadata: - namespace: restrict-voltypes-ns name: gooddeployment06 spec: replicas: 1 @@ -110,7 +106,6 @@ spec: apiVersion: apps/v1 kind: Deployment metadata: - namespace: restrict-voltypes-ns name: gooddeployment07 spec: replicas: 1 @@ -137,7 +132,6 @@ spec: apiVersion: apps/v1 kind: Deployment metadata: - namespace: restrict-voltypes-ns name: gooddeployment08 spec: replicas: 1 @@ -167,7 +161,6 @@ spec: apiVersion: batch/v1 kind: CronJob metadata: - namespace: restrict-voltypes-ns name: goodcronjob01 spec: schedule: "*/1 * * * *" @@ -183,7 +176,6 @@ spec: apiVersion: batch/v1 kind: CronJob metadata: - namespace: restrict-voltypes-ns name: goodcronjob02 spec: schedule: "*/1 * * * *" @@ -205,7 +197,6 @@ spec: apiVersion: batch/v1 kind: CronJob metadata: - namespace: restrict-voltypes-ns name: goodcronjob03 spec: schedule: "*/1 * * * *" @@ -228,7 +219,6 @@ spec: apiVersion: batch/v1 kind: CronJob metadata: - namespace: restrict-voltypes-ns name: goodcronjob04 spec: schedule: "*/1 * * * *" @@ -253,7 +243,6 @@ spec: apiVersion: batch/v1 kind: CronJob metadata: - namespace: restrict-voltypes-ns name: goodcronjob05 spec: schedule: "*/1 * * * *" @@ -282,7 +271,6 @@ spec: apiVersion: batch/v1 kind: CronJob metadata: - namespace: restrict-voltypes-ns name: goodcronjob06 spec: schedule: "*/1 * * * *" @@ -314,7 +302,6 @@ spec: apiVersion: batch/v1 kind: CronJob metadata: - namespace: restrict-voltypes-ns name: goodcronjob07 spec: schedule: "*/1 * * * *" @@ -338,7 +325,6 @@ spec: apiVersion: batch/v1 kind: CronJob metadata: - namespace: restrict-voltypes-ns name: goodcronjob08 spec: schedule: "*/1 * * * *" @@ -365,7 +351,6 @@ spec: apiVersion: batch/v1 kind: CronJob metadata: - namespace: restrict-voltypes-ns name: goodcronjob09 spec: schedule: "*/1 * * * *" diff --git a/pod-security-cel/restricted/restrict-volume-types/artifacthub-pkg.yml b/pod-security-cel/restricted/restrict-volume-types/artifacthub-pkg.yml index f231b9264..d31a692c6 100644 --- a/pod-security-cel/restricted/restrict-volume-types/artifacthub-pkg.yml +++ b/pod-security-cel/restricted/restrict-volume-types/artifacthub-pkg.yml @@ -19,5 +19,5 @@ annotations: kyverno/category: "Pod Security Standards (Restricted)" kyverno/kubernetesVersion: "1.26-1.27" kyverno/subject: "Pod,Volume" -digest: d5e29d1e422d57878e74db9bc93f8db1588c6dbb777e13a02d873952a5134d59 +digest: 0b2ded796c6a4ad41059c39be548ec980c64c2adde87119a9290d26ada5628f9 createdAt: "2024-01-02T15:37:55Z" diff --git a/pod-security-cel/restricted/restrict-volume-types/restrict-volume-types.yaml b/pod-security-cel/restricted/restrict-volume-types/restrict-volume-types.yaml index 7d57ec798..5dec2183d 100644 --- a/pod-security-cel/restricted/restrict-volume-types/restrict-volume-types.yaml +++ b/pod-security-cel/restricted/restrict-volume-types/restrict-volume-types.yaml @@ -24,6 +24,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/pod-security/baseline/disallow-proc-mount/.chainsaw-test/chainsaw-test.yaml b/pod-security/baseline/disallow-proc-mount/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..04baf8fe0 --- /dev/null +++ b/pod-security/baseline/disallow-proc-mount/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,40 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: disallow-proc-mount +spec: + # disable templating because it can cause issues with CEL expressions + template: false + steps: + - name: step-01 + try: + - apply: + file: ../disallow-proc-mount.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: disallow-proc-mount + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: pod-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: pod-bad.yaml + - apply: + file: podcontroller-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: podcontroller-bad.yaml diff --git a/pod-security/baseline/disallow-proc-mount/.chainsaw-test/pod-bad.yaml b/pod-security/baseline/disallow-proc-mount/.chainsaw-test/pod-bad.yaml new file mode 100644 index 000000000..623c582d3 --- /dev/null +++ b/pod-security/baseline/disallow-proc-mount/.chainsaw-test/pod-bad.yaml @@ -0,0 +1,73 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod01 +spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + procMount: Unmasked +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod02 +spec: + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + procMount: Unmasked +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod03 +spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + procMount: Unmasked + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod04 +spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + - name: initcontainer02 + image: dummyimagename + securityContext: + procMount: Unmasked + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod05 +spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + - name: initcontainer02 + image: dummyimagename + securityContext: + procMount: Unmasked + containers: + - name: container01 + image: dummyimagename + securityContext: + procMount: Unmasked +--- \ No newline at end of file diff --git a/pod-security/baseline/disallow-proc-mount/.chainsaw-test/pod-good.yaml b/pod-security/baseline/disallow-proc-mount/.chainsaw-test/pod-good.yaml new file mode 100644 index 000000000..747d648e2 --- /dev/null +++ b/pod-security/baseline/disallow-proc-mount/.chainsaw-test/pod-good.yaml @@ -0,0 +1,78 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod01 +spec: + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod02 +spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + procMount: Default +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod03 +spec: + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + procMount: Default +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod04 +spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod05 +spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + procMount: Default + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod06 +spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + - name: initcontainer02 + image: dummyimagename + securityContext: + procMount: Default + containers: + - name: container01 + image: dummyimagename + securityContext: + procMount: Default +--- \ No newline at end of file diff --git a/pod-security/baseline/disallow-proc-mount/.chainsaw-test/podcontroller-bad.yaml b/pod-security/baseline/disallow-proc-mount/.chainsaw-test/podcontroller-bad.yaml new file mode 100644 index 000000000..b719c34b3 --- /dev/null +++ b/pod-security/baseline/disallow-proc-mount/.chainsaw-test/podcontroller-bad.yaml @@ -0,0 +1,220 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + procMount: Unmasked +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment02 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + procMount: Unmasked +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment03 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + procMount: Unmasked + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment04 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + - name: initcontainer02 + image: dummyimagename + securityContext: + procMount: Unmasked + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: baddeployment05 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + - name: initcontainer02 + image: dummyimagename + securityContext: + procMount: Unmasked + containers: + - name: container01 + image: dummyimagename + securityContext: + procMount: Unmasked +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob01 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + securityContext: + procMount: Unmasked +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob02 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + procMount: Unmasked +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob03 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + procMount: Unmasked + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob04 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: dummyimagename + - name: initcontainer02 + image: dummyimagename + securityContext: + procMount: Unmasked + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob05 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: dummyimagename + - name: initcontainer02 + image: dummyimagename + securityContext: + procMount: Unmasked + containers: + - name: container01 + image: dummyimagename + securityContext: + procMount: Unmasked +--- \ No newline at end of file diff --git a/pod-security/baseline/disallow-proc-mount/.chainsaw-test/podcontroller-good.yaml b/pod-security/baseline/disallow-proc-mount/.chainsaw-test/podcontroller-good.yaml new file mode 100644 index 000000000..83e0d5aac --- /dev/null +++ b/pod-security/baseline/disallow-proc-mount/.chainsaw-test/podcontroller-good.yaml @@ -0,0 +1,245 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment02 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + securityContext: + procMount: Default +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment03 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + procMount: Default +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment04 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment05 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + procMount: Default + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gooddeployment06 +spec: + replicas: 1 + selector: + matchLabels: + app: app + template: + metadata: + labels: + app: app + spec: + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + procMount: Default + - name: initcontainer02 + image: dummyimagename + containers: + - name: container01 + image: dummyimagename + securityContext: + procMount: Default +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob01 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob02 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + securityContext: + procMount: Default +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob03 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: container01 + image: dummyimagename + - name: container02 + image: dummyimagename + securityContext: + procMount: Default +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob04 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: dummyimagename + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob05 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: dummyimagename + securityContext: + procMount: Default + containers: + - name: container01 + image: dummyimagename +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob06 +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + initContainers: + - name: initcontainer01 + image: dummyimagename + - name: initcontainer02 + image: dummyimagename + securityContext: + procMount: Default + containers: + - name: container01 + image: dummyimagename + securityContext: + procMount: Default +--- \ No newline at end of file diff --git a/pod-security/baseline/disallow-proc-mount/.chainsaw-test/policy-ready.yaml b/pod-security/baseline/disallow-proc-mount/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..87ef3bbcb --- /dev/null +++ b/pod-security/baseline/disallow-proc-mount/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: disallow-proc-mount +status: + ready: true diff --git a/pod-security/restricted/restrict-volume-types/.chainsaw-test/chainsaw-test.yaml b/pod-security/restricted/restrict-volume-types/.chainsaw-test/chainsaw-test.yaml index 7d2604bfd..005d759b7 100755 --- a/pod-security/restricted/restrict-volume-types/.chainsaw-test/chainsaw-test.yaml +++ b/pod-security/restricted/restrict-volume-types/.chainsaw-test/chainsaw-test.yaml @@ -8,8 +8,6 @@ spec: steps: - name: step-01 try: - - apply: - file: ns.yaml - apply: file: ../restrict-volume-types.yaml - patch: @@ -41,5 +39,8 @@ spec: - name: step-99 try: - script: - content: kubectl delete all --all --force --grace-period=0 -n restrict-voltypes-ns + env: + - name: NAMESPACE + value: $namespace + content: kubectl delete all --all --force --grace-period=0 -n $NAMESPACE diff --git a/pod-security/restricted/restrict-volume-types/.chainsaw-test/ns.yaml b/pod-security/restricted/restrict-volume-types/.chainsaw-test/ns.yaml deleted file mode 100644 index 9cde8be39..000000000 --- a/pod-security/restricted/restrict-volume-types/.chainsaw-test/ns.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: restrict-voltypes-ns \ No newline at end of file diff --git a/pod-security/restricted/restrict-volume-types/.chainsaw-test/pod-good.yaml b/pod-security/restricted/restrict-volume-types/.chainsaw-test/pod-good.yaml index a12d37f25..4ea15fd1d 100644 --- a/pod-security/restricted/restrict-volume-types/.chainsaw-test/pod-good.yaml +++ b/pod-security/restricted/restrict-volume-types/.chainsaw-test/pod-good.yaml @@ -1,7 +1,6 @@ apiVersion: v1 kind: Pod metadata: - namespace: restrict-voltypes-ns name: goodpod01 spec: containers: @@ -11,7 +10,6 @@ spec: apiVersion: v1 kind: Pod metadata: - namespace: restrict-voltypes-ns name: goodpod02 spec: containers: @@ -27,7 +25,6 @@ spec: apiVersion: v1 kind: Pod metadata: - namespace: restrict-voltypes-ns name: goodpod03 spec: containers: @@ -44,7 +41,6 @@ spec: apiVersion: v1 kind: Pod metadata: - namespace: restrict-voltypes-ns name: goodpod04 spec: containers: @@ -63,7 +59,6 @@ spec: apiVersion: v1 kind: Pod metadata: - namespace: restrict-voltypes-ns name: goodpod05 labels: foo: bar @@ -85,7 +80,6 @@ spec: apiVersion: v1 kind: Pod metadata: - namespace: restrict-voltypes-ns name: goodpod06 spec: containers: @@ -111,7 +105,6 @@ spec: apiVersion: v1 kind: Pod metadata: - namespace: restrict-voltypes-ns name: goodpod07 spec: containers: @@ -129,7 +122,6 @@ spec: apiVersion: v1 kind: Pod metadata: - namespace: restrict-voltypes-ns name: goodpod08 spec: containers: @@ -150,7 +142,6 @@ spec: apiVersion: v1 kind: Pod metadata: - namespace: restrict-voltypes-ns name: goodpod09 spec: containers: diff --git a/pod-security/restricted/restrict-volume-types/.chainsaw-test/podcontroller-good.yaml b/pod-security/restricted/restrict-volume-types/.chainsaw-test/podcontroller-good.yaml index 45378d1e6..26c344b15 100644 --- a/pod-security/restricted/restrict-volume-types/.chainsaw-test/podcontroller-good.yaml +++ b/pod-security/restricted/restrict-volume-types/.chainsaw-test/podcontroller-good.yaml @@ -1,7 +1,6 @@ apiVersion: apps/v1 kind: Deployment metadata: - namespace: restrict-voltypes-ns name: gooddeployment01 spec: replicas: 1 @@ -20,7 +19,6 @@ spec: apiVersion: apps/v1 kind: Deployment metadata: - namespace: restrict-voltypes-ns name: gooddeployment02 spec: replicas: 1 @@ -45,7 +43,6 @@ spec: apiVersion: apps/v1 kind: Deployment metadata: - namespace: restrict-voltypes-ns name: gooddeployment05 spec: replicas: 1 @@ -75,7 +72,6 @@ spec: apiVersion: apps/v1 kind: Deployment metadata: - namespace: restrict-voltypes-ns name: gooddeployment06 spec: replicas: 1 @@ -110,7 +106,6 @@ spec: apiVersion: apps/v1 kind: Deployment metadata: - namespace: restrict-voltypes-ns name: gooddeployment07 spec: replicas: 1 @@ -137,7 +132,6 @@ spec: apiVersion: apps/v1 kind: Deployment metadata: - namespace: restrict-voltypes-ns name: gooddeployment08 spec: replicas: 1 @@ -167,7 +161,6 @@ spec: apiVersion: batch/v1 kind: CronJob metadata: - namespace: restrict-voltypes-ns name: goodcronjob01 spec: schedule: "*/1 * * * *" @@ -183,7 +176,6 @@ spec: apiVersion: batch/v1 kind: CronJob metadata: - namespace: restrict-voltypes-ns name: goodcronjob02 spec: schedule: "*/1 * * * *" @@ -205,7 +197,6 @@ spec: apiVersion: batch/v1 kind: CronJob metadata: - namespace: restrict-voltypes-ns name: goodcronjob03 spec: schedule: "*/1 * * * *" @@ -228,7 +219,6 @@ spec: apiVersion: batch/v1 kind: CronJob metadata: - namespace: restrict-voltypes-ns name: goodcronjob04 spec: schedule: "*/1 * * * *" @@ -253,7 +243,6 @@ spec: apiVersion: batch/v1 kind: CronJob metadata: - namespace: restrict-voltypes-ns name: goodcronjob05 spec: schedule: "*/1 * * * *" @@ -282,7 +271,6 @@ spec: apiVersion: batch/v1 kind: CronJob metadata: - namespace: restrict-voltypes-ns name: goodcronjob06 spec: schedule: "*/1 * * * *" @@ -314,7 +302,6 @@ spec: apiVersion: batch/v1 kind: CronJob metadata: - namespace: restrict-voltypes-ns name: goodcronjob07 spec: schedule: "*/1 * * * *" @@ -338,7 +325,6 @@ spec: apiVersion: batch/v1 kind: CronJob metadata: - namespace: restrict-voltypes-ns name: goodcronjob08 spec: schedule: "*/1 * * * *" @@ -365,7 +351,6 @@ spec: apiVersion: batch/v1 kind: CronJob metadata: - namespace: restrict-voltypes-ns name: goodcronjob09 spec: schedule: "*/1 * * * *" diff --git a/psa-cel/add-psa-namespace-reporting/.chainsaw-test/chainsaw-test.yaml b/psa-cel/add-psa-namespace-reporting/.chainsaw-test/chainsaw-test.yaml new file mode 100644 index 000000000..ed3b2044c --- /dev/null +++ b/psa-cel/add-psa-namespace-reporting/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,31 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: add-psa-namespace-reporting +spec: + steps: + - name: apply-policy + try: + - apply: + file: ../add-psa-namespace-reporting.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: add-psa-namespace-reporting + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: apply-policy-test + try: + - apply: + file: namespace-with-psa-labels.yaml + - apply: + expect: + - check: + ($error != null): true + file: namespace-without-psa-labels.yaml diff --git a/psa-cel/add-psa-namespace-reporting/.chainsaw-test/namespace-with-psa-labels.yaml b/psa-cel/add-psa-namespace-reporting/.chainsaw-test/namespace-with-psa-labels.yaml new file mode 100644 index 000000000..e94a09e9a --- /dev/null +++ b/psa-cel/add-psa-namespace-reporting/.chainsaw-test/namespace-with-psa-labels.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: test + labels: + pod-security.kubernetes.io/enforce: "privileged" \ No newline at end of file diff --git a/kasten/k10-generate-gold-backup-policy/.chainsaw-test/chainsaw-step-03-apply-1.yaml b/psa-cel/add-psa-namespace-reporting/.chainsaw-test/namespace-without-psa-labels.yaml old mode 100755 new mode 100644 similarity index 67% rename from kasten/k10-generate-gold-backup-policy/.chainsaw-test/chainsaw-step-03-apply-1.yaml rename to psa-cel/add-psa-namespace-reporting/.chainsaw-test/namespace-without-psa-labels.yaml index caaef7d37..7956df12c --- a/kasten/k10-generate-gold-backup-policy/.chainsaw-test/chainsaw-step-03-apply-1.yaml +++ b/psa-cel/add-psa-namespace-reporting/.chainsaw-test/namespace-without-psa-labels.yaml @@ -1,4 +1,4 @@ apiVersion: v1 kind: Namespace metadata: - name: k10-gp-ns01 + name: test-fail \ No newline at end of file diff --git a/psa-cel/add-psa-namespace-reporting/.chainsaw-test/policy-ready.yaml b/psa-cel/add-psa-namespace-reporting/.chainsaw-test/policy-ready.yaml new file mode 100644 index 000000000..cfb6ab67a --- /dev/null +++ b/psa-cel/add-psa-namespace-reporting/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,9 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: add-psa-namespace-reporting +status: + ready: true + + + diff --git a/psa-cel/add-psa-namespace-reporting/.kyverno-test/kyverno-test.yaml b/psa-cel/add-psa-namespace-reporting/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..513ac0fe8 --- /dev/null +++ b/psa-cel/add-psa-namespace-reporting/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,22 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: add-psa-namespace-reporting-tests +policies: +- ../add-psa-namespace-reporting.yaml +resources: +- namespace-with-psa-labels.yaml +- namespace-without-psa-labels.yaml +results: + - kind: Namespace + policy: add-psa-namespace-reporting + resources: + - test + rule: check-namespace-labels + result: pass + - kind: Namespace + policy: add-psa-namespace-reporting + resources: + - test-fail + rule: check-namespace-labels + result: fail \ No newline at end of file diff --git a/psa-cel/add-psa-namespace-reporting/.kyverno-test/namespace-with-psa-labels.yaml b/psa-cel/add-psa-namespace-reporting/.kyverno-test/namespace-with-psa-labels.yaml new file mode 100644 index 000000000..e94a09e9a --- /dev/null +++ b/psa-cel/add-psa-namespace-reporting/.kyverno-test/namespace-with-psa-labels.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: test + labels: + pod-security.kubernetes.io/enforce: "privileged" \ No newline at end of file diff --git a/kasten/k10-generate-gold-backup-policy/.chainsaw-test/chainsaw-step-03-apply-2.yaml b/psa-cel/add-psa-namespace-reporting/.kyverno-test/namespace-without-psa-labels.yaml old mode 100755 new mode 100644 similarity index 67% rename from kasten/k10-generate-gold-backup-policy/.chainsaw-test/chainsaw-step-03-apply-2.yaml rename to psa-cel/add-psa-namespace-reporting/.kyverno-test/namespace-without-psa-labels.yaml index b6693353e..7956df12c --- a/kasten/k10-generate-gold-backup-policy/.chainsaw-test/chainsaw-step-03-apply-2.yaml +++ b/psa-cel/add-psa-namespace-reporting/.kyverno-test/namespace-without-psa-labels.yaml @@ -1,4 +1,4 @@ apiVersion: v1 kind: Namespace metadata: - name: k10-gp-ns02 + name: test-fail \ No newline at end of file diff --git a/psa-cel/add-psa-namespace-reporting/add-psa-namespace-reporting.yaml b/psa-cel/add-psa-namespace-reporting/add-psa-namespace-reporting.yaml new file mode 100644 index 000000000..94f57bfd2 --- /dev/null +++ b/psa-cel/add-psa-namespace-reporting/add-psa-namespace-reporting.yaml @@ -0,0 +1,42 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: add-psa-namespace-reporting + annotations: + policies.kyverno.io/title: Add PSA Namespace Reporting in CEL expressions + policies.kyverno.io/category: Pod Security Admission, EKS Best Practices in CEL + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Namespace + policies.kyverno.io/description: >- + This policy is valuable as it ensures that all namespaces within a Kubernetes + cluster are labeled with Pod Security Admission (PSA) labels, which are crucial + for defining security levels and ensuring that pods within a namespace operate + under the defined Pod Security Standard (PSS). By enforcing namespace labeling, + This policy audits namespaces to verify the presence of PSA labels. + If a namespace is found without the required labels, it generates and maintain + and ClusterPolicy Report in default namespace. + This helps administrators identify namespaces that do not comply with the + organization's security practices and take appropriate action to rectify the + situation. +spec: + validationFailureAction: Audit + background: true + rules: + - name: check-namespace-labels + match: + any: + - resources: + kinds: + - Namespace + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "has(object.metadata.labels) && object.metadata.labels.exists(label, label.startsWith('pod-security.kubernetes.io/') && object.metadata.labels[label] != '')" + message: This Namespace is missing a PSA label. + diff --git a/psa-cel/add-psa-namespace-reporting/artifacthub-pkg.yml b/psa-cel/add-psa-namespace-reporting/artifacthub-pkg.yml new file mode 100644 index 000000000..0f68883ae --- /dev/null +++ b/psa-cel/add-psa-namespace-reporting/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: add-psa-namespace-reporting-cel +version: 1.0.0 +displayName: Add PSA Namespace Reporting in CEL expressions +description: >- + This policy is valuable as it ensures that all namespaces within a Kubernetes cluster are labeled with Pod Security Admission (PSA) labels, which are crucial for defining security levels and ensuring that pods within a namespace operate under the defined Pod Security Standard (PSS). By enforcing namespace labeling, This policy audits namespaces to verify the presence of PSA labels. If a namespace is found without the required labels, it generates and maintain and ClusterPolicy Report in default namespace. This helps administrators identify namespaces that do not comply with the organization's security practices and take appropriate action to rectify the situation. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/psa-cel/add-psa-namespace-reporting/add-psa-namespace-reporting.yaml + ``` +keywords: + - kyverno + - Pod Security Admission + - EKS Best Practices + - CEL Expressions +readme: | + This policy is valuable as it ensures that all namespaces within a Kubernetes cluster are labeled with Pod Security Admission (PSA) labels, which are crucial for defining security levels and ensuring that pods within a namespace operate under the defined Pod Security Standard (PSS). By enforcing namespace labeling, This policy audits namespaces to verify the presence of PSA labels. If a namespace is found without the required labels, it generates and maintain and ClusterPolicy Report in default namespace. This helps administrators identify namespaces that do not comply with the organization's security practices and take appropriate action to rectify the situation. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Pod Security Admission, EKS Best Practices in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Namespace" +digest: d624eddc7d55bcdb3129ccb57f6e7d840b6eda6cf57134ce7385b89a92ea8686 +createdAt: "2024-05-22T08:30:28Z" diff --git a/psa-cel/deny-privileged-profile/.chainsaw-test/chainsaw-test.yaml b/psa-cel/deny-privileged-profile/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..3a62388b2 --- /dev/null +++ b/psa-cel/deny-privileged-profile/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,98 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: deny-privileged-profile +spec: + steps: + - name: step-01 + try: + - apply: + file: ../deny-privileged-profile.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: deny-privileged-profile + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - script: + content: | + #!/bin/bash + set -eu + cp $KUBECONFIG temp + export KUBECONFIG=./temp + export USERNAME=denyprivilegeduser + export CA=ca.crt + #### Get CA certificate from kubeconfig assuming it's the first in the list. + kubectl config view --raw -o jsonpath='{.clusters[0].cluster.certificate-authority-data}' | base64 --decode > ./ca.crt + #### Set CLUSTER_SERVER from kubeconfig assuming it's the first in the list. + CLUSTER_SERVER="$(kubectl config view --raw -o jsonpath='{.clusters[0].cluster.server}')" + #### Set CLUSTER from kubeconfig assuming it's the first in the list. + CLUSTER="$(kubectl config view --raw -o jsonpath='{.clusters[0].name}')" + #### Generate private key + openssl genrsa -out $USERNAME.key 2048 + #### Create CSR + openssl req -new -key $USERNAME.key -out $USERNAME.csr -subj "/O=testorg/CN=$USERNAME" + #### Send CSR to kube-apiserver for approval + cat < $USERNAME.crt + #### + #### Create the credential object and output the new kubeconfig file + kubectl config set-credentials $USERNAME --client-certificate=$USERNAME.crt --client-key=$USERNAME.key --embed-certs + #### Set the context + kubectl config set-context $USERNAME-context --user=$USERNAME --cluster=$CLUSTER + # Delete CSR + kubectl delete csr $USERNAME + - apply: + file: cr.yaml + - apply: + file: crb.yaml + - script: + content: | + #!/bin/bash + set -eu + export KUBECONFIG=./temp + kubectl --context=denyprivilegeduser-context create -f ns-good.yaml + - script: + content: | + #!/bin/bash + set -eu + export KUBECONFIG=./temp + if kubectl --context=denyprivilegeduser-context create -f ns-bad.yaml; then exit 1; else exit 0; fi + - sleep: + duration: 5s + finally: + - script: + content: kubectl delete -f ns-good.yaml --ignore-not-found + - script: + content: kubectl delete -f ns-bad.yaml --ignore-not-found + - script: + content: | + set -e + rm ./temp + - name: step-05 + try: + - apply: + file: ns-good.yaml + - apply: + file: ns-bad.yaml diff --git a/psa-cel/deny-privileged-profile/.chainsaw-test/cr.yaml b/psa-cel/deny-privileged-profile/.chainsaw-test/cr.yaml new file mode 100755 index 000000000..f39afe9fc --- /dev/null +++ b/psa-cel/deny-privileged-profile/.chainsaw-test/cr.yaml @@ -0,0 +1,11 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ns-deleter +rules: +- apiGroups: + - "" + resources: + - namespaces + verbs: + - create diff --git a/psa-cel/deny-privileged-profile/.chainsaw-test/crb.yaml b/psa-cel/deny-privileged-profile/.chainsaw-test/crb.yaml new file mode 100755 index 000000000..1f3cc8101 --- /dev/null +++ b/psa-cel/deny-privileged-profile/.chainsaw-test/crb.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ns-deleter:denyprivilegeduser +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ns-deleter +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: denyprivilegeduser diff --git a/psa-cel/deny-privileged-profile/.chainsaw-test/ns-bad.yaml b/psa-cel/deny-privileged-profile/.chainsaw-test/ns-bad.yaml new file mode 100644 index 000000000..21f29bb8b --- /dev/null +++ b/psa-cel/deny-privileged-profile/.chainsaw-test/ns-bad.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + pod-security.kubernetes.io/enforce: privileged + name: deny-privileged-bad-ns01 +--- +apiVersion: v1 +kind: Namespace +metadata: + labels: + pod-security.kubernetes.io/enforce: privileged + foo: bar + name: deny-privileged-bad-ns02 \ No newline at end of file diff --git a/psa-cel/deny-privileged-profile/.chainsaw-test/ns-good.yaml b/psa-cel/deny-privileged-profile/.chainsaw-test/ns-good.yaml new file mode 100644 index 000000000..f760fac2c --- /dev/null +++ b/psa-cel/deny-privileged-profile/.chainsaw-test/ns-good.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + pod-security.kubernetes.io/enforce: baseline + name: deny-privileged-good-ns01 +--- +apiVersion: v1 +kind: Namespace +metadata: + labels: + foo: bar + name: deny-privileged-good-ns02 +--- +apiVersion: v1 +kind: Namespace +metadata: + name: deny-privileged-good-ns03 \ No newline at end of file diff --git a/psa-cel/deny-privileged-profile/.chainsaw-test/policy-ready.yaml b/psa-cel/deny-privileged-profile/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..93dfa2f2d --- /dev/null +++ b/psa-cel/deny-privileged-profile/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: deny-privileged-profile +status: + ready: true diff --git a/psa-cel/deny-privileged-profile/.kyverno-test/kyverno-test.yaml b/psa-cel/deny-privileged-profile/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..6d26b1d82 --- /dev/null +++ b/psa-cel/deny-privileged-profile/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,16 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: deny-privileged-profile +policies: +- ../deny-privileged-profile.yaml +resources: +- ../.chainsaw-test/ns-bad.yaml +results: +- policy: deny-privileged-profile + rule: check-privileged + kind: Namespace + resources: + - deny-privileged-bad-ns01 + - deny-privileged-bad-ns02 + result: fail \ No newline at end of file diff --git a/psa-cel/deny-privileged-profile/artifacthub-pkg.yml b/psa-cel/deny-privileged-profile/artifacthub-pkg.yml new file mode 100644 index 000000000..0a78df882 --- /dev/null +++ b/psa-cel/deny-privileged-profile/artifacthub-pkg.yml @@ -0,0 +1,23 @@ +name: deny-privileged-profile-cel +version: 1.0.0 +displayName: Deny Privileged Profile in CEL expressions +description: >- + When Pod Security Admission (PSA) is enforced at the cluster level via an AdmissionConfiguration file which defines a default level at baseline or restricted, setting of a label at the `privileged` profile will effectively cause unrestricted workloads in that Namespace, overriding the cluster default. This may effectively represent a circumvention attempt and should be closely controlled. This policy ensures that only those holding the cluster-admin ClusterRole may create Namespaces which assign the label `pod-security.kubernetes.io/enforce=privileged`. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/psa-cel/deny-privileged-profile/deny-privileged-profile.yaml + ``` +keywords: + - kyverno + - Pod Security Admission + - CEL Expressions +readme: | + When Pod Security Admission (PSA) is enforced at the cluster level via an AdmissionConfiguration file which defines a default level at baseline or restricted, setting of a label at the `privileged` profile will effectively cause unrestricted workloads in that Namespace, overriding the cluster default. This may effectively represent a circumvention attempt and should be closely controlled. This policy ensures that only those holding the cluster-admin ClusterRole may create Namespaces which assign the label `pod-security.kubernetes.io/enforce=privileged`. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Pod Security Admission in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Namespace" +digest: a8eb6c291f91e8ebd2535712413432e0659f2839c0929334e5f69a883506d85a +createdAt: "2024-05-22T08:35:47Z" diff --git a/psa-cel/deny-privileged-profile/deny-privileged-profile.yaml b/psa-cel/deny-privileged-profile/deny-privileged-profile.yaml new file mode 100644 index 000000000..f01dddfee --- /dev/null +++ b/psa-cel/deny-privileged-profile/deny-privileged-profile.yaml @@ -0,0 +1,44 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: deny-privileged-profile + annotations: + policies.kyverno.io/title: Deny Privileged Profile in CEL expressions + policies.kyverno.io/category: Pod Security Admission in CEL expressions + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Namespace + policies.kyverno.io/description: >- + When Pod Security Admission (PSA) is enforced at the cluster level + via an AdmissionConfiguration file which defines a default level at + baseline or restricted, setting of a label at the `privileged` profile + will effectively cause unrestricted workloads in that Namespace, overriding + the cluster default. This may effectively represent a circumvention attempt + and should be closely controlled. This policy ensures that only those holding + the cluster-admin ClusterRole may create Namespaces which assign the label + `pod-security.kubernetes.io/enforce=privileged`. +spec: + validationFailureAction: Audit + background: false + rules: + - name: check-privileged + match: + any: + - resources: + kinds: + - Namespace + selector: + matchLabels: + pod-security.kubernetes.io/enforce: privileged + exclude: + any: + - clusterRoles: + - cluster-admin + validate: + cel: + expressions: + - expression: "false" + message: Only cluster-admins may create Namespaces that allow setting the privileged level. + diff --git a/psa/deny-privileged-profile/.kyverno-test/kyverno-test.yaml b/psa/deny-privileged-profile/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..6d26b1d82 --- /dev/null +++ b/psa/deny-privileged-profile/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,16 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: deny-privileged-profile +policies: +- ../deny-privileged-profile.yaml +resources: +- ../.chainsaw-test/ns-bad.yaml +results: +- policy: deny-privileged-profile + rule: check-privileged + kind: Namespace + resources: + - deny-privileged-bad-ns01 + - deny-privileged-bad-ns02 + result: fail \ No newline at end of file diff --git a/traefik-cel/disallow-default-tlsoptions/.chainsaw-test/chainsaw-test.yaml b/traefik-cel/disallow-default-tlsoptions/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..82d8a6f91 --- /dev/null +++ b/traefik-cel/disallow-default-tlsoptions/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,87 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: disallow-default-tlsoptions +spec: + steps: + - name: step-00 + try: + - assert: + file: crd-assert.yaml + - name: step-01 + try: + - apply: + file: ../disallow-default-tlsoptions.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: disallow-default-tlsoptions + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - script: + content: | + #!/bin/bash + set -eu + export USERNAME=tlsoptionsuser + export CA=ca.crt + #### Get CA certificate from kubeconfig assuming it's the first in the list. + kubectl config view --raw -o jsonpath='{.clusters[0].cluster.certificate-authority-data}' | base64 --decode > ./ca.crt + #### Set CLUSTER_SERVER from kubeconfig assuming it's the first in the list. + CLUSTER_SERVER="$(kubectl config view --raw -o jsonpath='{.clusters[0].cluster.server}')" + #### Set CLUSTER from kubeconfig assuming it's the first in the list. + CLUSTER="$(kubectl config view --raw -o jsonpath='{.clusters[0].name}')" + #### Generate private key + openssl genrsa -out $USERNAME.key 2048 + #### Create CSR + openssl req -new -key $USERNAME.key -out $USERNAME.csr -subj "/O=testorg/CN=$USERNAME" + #### Send CSR to kube-apiserver for approval + cat < $USERNAME.crt + #### + #### Create the credential object and output the new kubeconfig file + kubectl config set-credentials $USERNAME --client-certificate=$USERNAME.crt --client-key=$USERNAME.key --embed-certs + #### Set the context + kubectl config set-context $USERNAME-context --user=$USERNAME --cluster=$CLUSTER + # Delete CSR + kubectl delete csr $USERNAME + - name: step-03 + try: + - apply: + file: cr.yaml + - apply: + file: crb.yaml + - name: step-04 + try: + - script: + content: if kubectl create --context=tlsoptionsuser-context -f tlsoption.yaml; + then exit 1; else exit 0; fi + - script: + content: kubectl create -f tlsoption.yaml + - name: step-99 + try: + - script: + content: | + kubectl delete -f tlsoption.yaml + kubectl config unset users.tlsoptionsuser + kubectl config unset contexts.tlsoptionsuser-context diff --git a/traefik-cel/disallow-default-tlsoptions/.chainsaw-test/cr.yaml b/traefik-cel/disallow-default-tlsoptions/.chainsaw-test/cr.yaml new file mode 100755 index 000000000..28edd3a9a --- /dev/null +++ b/traefik-cel/disallow-default-tlsoptions/.chainsaw-test/cr.yaml @@ -0,0 +1,11 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: tlsoptions-creator +rules: +- apiGroups: + - traefik.containo.us + resources: + - tlsoptions + verbs: + - create diff --git a/traefik-cel/disallow-default-tlsoptions/.chainsaw-test/crb.yaml b/traefik-cel/disallow-default-tlsoptions/.chainsaw-test/crb.yaml new file mode 100755 index 000000000..ffcdb7691 --- /dev/null +++ b/traefik-cel/disallow-default-tlsoptions/.chainsaw-test/crb.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: tlsoptions-creator:tlsoptionsuser +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: tlsoptions-creator +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: tlsoptionsuser diff --git a/traefik-cel/disallow-default-tlsoptions/.chainsaw-test/crd-assert.yaml b/traefik-cel/disallow-default-tlsoptions/.chainsaw-test/crd-assert.yaml new file mode 100755 index 000000000..086d560e0 --- /dev/null +++ b/traefik-cel/disallow-default-tlsoptions/.chainsaw-test/crd-assert.yaml @@ -0,0 +1,12 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: tlsoptions.traefik.containo.us +spec: {} +status: + acceptedNames: + kind: TLSOption + plural: tlsoptions + singular: tlsoption + storedVersions: + - v1alpha1 diff --git a/traefik-cel/disallow-default-tlsoptions/.chainsaw-test/policy-ready.yaml b/traefik-cel/disallow-default-tlsoptions/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..f3e37c449 --- /dev/null +++ b/traefik-cel/disallow-default-tlsoptions/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,6 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: disallow-default-tlsoptions +status: + ready: true diff --git a/traefik-cel/disallow-default-tlsoptions/.chainsaw-test/tlsoption.yaml b/traefik-cel/disallow-default-tlsoptions/.chainsaw-test/tlsoption.yaml new file mode 100644 index 000000000..c38aa643f --- /dev/null +++ b/traefik-cel/disallow-default-tlsoptions/.chainsaw-test/tlsoption.yaml @@ -0,0 +1,21 @@ +apiVersion: traefik.containo.us/v1alpha1 +kind: TLSOption +metadata: + name: default +spec: + minVersion: VersionTLS12 + maxVersion: VersionTLS13 + curvePreferences: + - CurveP521 + - CurveP384 + cipherSuites: + - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + - TLS_RSA_WITH_AES_256_GCM_SHA384 + clientAuth: + secretNames: + - secret-ca1 + - secret-ca2 + clientAuthType: VerifyClientCertIfGiven + sniStrict: true + alpnProtocols: + - foobar \ No newline at end of file diff --git a/traefik-cel/disallow-default-tlsoptions/.kyverno-test/kyverno-test.yaml b/traefik-cel/disallow-default-tlsoptions/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..445c7aaa5 --- /dev/null +++ b/traefik-cel/disallow-default-tlsoptions/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,15 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: disallow-default-tlsoptions +policies: +- ../disallow-default-tlsoptions.yaml +resources: +- resource.yaml +results: +- kind: TLSOption + policy: disallow-default-tlsoptions + resources: + - default + result: fail + rule: disallow-default-tlsoptions diff --git a/traefik-cel/disallow-default-tlsoptions/.kyverno-test/resource.yaml b/traefik-cel/disallow-default-tlsoptions/.kyverno-test/resource.yaml new file mode 100644 index 000000000..b7de0f06a --- /dev/null +++ b/traefik-cel/disallow-default-tlsoptions/.kyverno-test/resource.yaml @@ -0,0 +1,22 @@ +apiVersion: traefik.containo.us/v1alpha1 +kind: TLSOption +metadata: + name: default + namespace: default +spec: + minVersion: VersionTLS12 + maxVersion: VersionTLS13 + curvePreferences: + - CurveP521 + - CurveP384 + cipherSuites: + - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + - TLS_RSA_WITH_AES_256_GCM_SHA384 + clientAuth: + secretNames: + - secret-ca1 + - secret-ca2 + clientAuthType: VerifyClientCertIfGiven + sniStrict: true + alpnProtocols: + - foobar \ No newline at end of file diff --git a/traefik-cel/disallow-default-tlsoptions/artifacthub-pkg.yml b/traefik-cel/disallow-default-tlsoptions/artifacthub-pkg.yml new file mode 100644 index 000000000..bc588570b --- /dev/null +++ b/traefik-cel/disallow-default-tlsoptions/artifacthub-pkg.yml @@ -0,0 +1,23 @@ +name: disallow-default-tlsoptions-cel +version: 1.0.0 +displayName: Disallow Default TLSOptions in CEL expressions +description: >- + The TLSOption CustomResource sets cluster-wide TLS configuration options for Traefik when none are specified in a TLS router. Since this can take effect for all Ingress resources, creating the `default` TLSOption is a restricted operation. This policy ensures that only a cluster-admin can create the `default` TLSOption resource. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/traefik-cel/disallow-default-tlsoptions/disallow-default-tlsoptions.yaml + ``` +keywords: + - kyverno + - Traefik + - CEL Expressions +readme: | + The TLSOption CustomResource sets cluster-wide TLS configuration options for Traefik when none are specified in a TLS router. Since this can take effect for all Ingress resources, creating the `default` TLSOption is a restricted operation. This policy ensures that only a cluster-admin can create the `default` TLSOption resource. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Traefik in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "TLSOption" +digest: ddb6b4d4f7a09720499c6ad306b4ee73999003d0fde7d2feb35cb6b19d0c73df +createdAt: "2024-05-22T07:43:46Z" diff --git a/traefik-cel/disallow-default-tlsoptions/disallow-default-tlsoptions.yaml b/traefik-cel/disallow-default-tlsoptions/disallow-default-tlsoptions.yaml new file mode 100644 index 000000000..d09b5ad55 --- /dev/null +++ b/traefik-cel/disallow-default-tlsoptions/disallow-default-tlsoptions.yaml @@ -0,0 +1,37 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: disallow-default-tlsoptions + annotations: + policies.kyverno.io/title: Disallow Default TLSOptions in CEL expressions + policies.kyverno.io/category: Traefik in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: TLSOption + kyverno.io/kyverno-version: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + The TLSOption CustomResource sets cluster-wide TLS configuration options for Traefik when + none are specified in a TLS router. Since this can take effect for all Ingress resources, + creating the `default` TLSOption is a restricted operation. This policy ensures that + only a cluster-admin can create the `default` TLSOption resource. +spec: + validationFailureAction: Audit + background: false + rules: + - name: disallow-default-tlsoptions + match: + any: + - resources: + names: + - default + kinds: + - TLSOption + exclude: + clusterRoles: + - cluster-admin + validate: + cel: + expressions: + - expression: "false" + message: "Only cluster administrators are allowed to set default TLSOptions." +