From d209a7be96475d1bbf0c3d3f72b05138b0b9cc94 Mon Sep 17 00:00:00 2001 From: Jordi Gil Date: Fri, 6 Dec 2024 11:22:22 -0500 Subject: [PATCH] Add manifests for version 0.0.2 (#5599) Signed-off-by: Jordi Gil --- ...er-manager-metrics-service_v1_service.yaml | 23 ++ ...c.authorization.k8s.io_v1_clusterrole.yaml | 17 + ...covery-operator.clusterserviceversion.yaml | 310 ++++++++++++++++++ .../odf.openshift.io_noderecoveries.yaml | 170 ++++++++++ .../0.0.2/metadata/annotations.yaml | 15 + .../0.0.2/tests/scorecard/config.yaml | 70 ++++ operators/odf-node-recovery-operator/Makefile | 120 +++++++ 7 files changed, 725 insertions(+) create mode 100644 operators/odf-node-recovery-operator/0.0.2/manifests/odf-node-recovery-operator-controller-manager-metrics-service_v1_service.yaml create mode 100644 operators/odf-node-recovery-operator/0.0.2/manifests/odf-node-recovery-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml create mode 100644 operators/odf-node-recovery-operator/0.0.2/manifests/odf-node-recovery-operator.clusterserviceversion.yaml create mode 100644 operators/odf-node-recovery-operator/0.0.2/manifests/odf.openshift.io_noderecoveries.yaml create mode 100644 operators/odf-node-recovery-operator/0.0.2/metadata/annotations.yaml create mode 100644 operators/odf-node-recovery-operator/0.0.2/tests/scorecard/config.yaml create mode 100644 operators/odf-node-recovery-operator/Makefile diff --git a/operators/odf-node-recovery-operator/0.0.2/manifests/odf-node-recovery-operator-controller-manager-metrics-service_v1_service.yaml b/operators/odf-node-recovery-operator/0.0.2/manifests/odf-node-recovery-operator-controller-manager-metrics-service_v1_service.yaml new file mode 100644 index 00000000000..41e14c8f402 --- /dev/null +++ b/operators/odf-node-recovery-operator/0.0.2/manifests/odf-node-recovery-operator-controller-manager-metrics-service_v1_service.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Service +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: odf-node-recovery-operator + app.kubernetes.io/instance: controller-manager-metrics-service + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: service + app.kubernetes.io/part-of: odf-node-recovery-operator + control-plane: controller-manager + name: odf-node-recovery-operator-controller-manager-metrics-service +spec: + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: https + selector: + control-plane: controller-manager +status: + loadBalancer: {} diff --git a/operators/odf-node-recovery-operator/0.0.2/manifests/odf-node-recovery-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml b/operators/odf-node-recovery-operator/0.0.2/manifests/odf-node-recovery-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml new file mode 100644 index 00000000000..29b8ab5f598 --- /dev/null +++ b/operators/odf-node-recovery-operator/0.0.2/manifests/odf-node-recovery-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: odf-node-recovery-operator + app.kubernetes.io/instance: metrics-reader + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: clusterrole + app.kubernetes.io/part-of: odf-node-recovery-operator + name: odf-node-recovery-operator-metrics-reader +rules: +- nonResourceURLs: + - /metrics + verbs: + - get diff --git a/operators/odf-node-recovery-operator/0.0.2/manifests/odf-node-recovery-operator.clusterserviceversion.yaml b/operators/odf-node-recovery-operator/0.0.2/manifests/odf-node-recovery-operator.clusterserviceversion.yaml new file mode 100644 index 00000000000..42bb8a697da --- /dev/null +++ b/operators/odf-node-recovery-operator/0.0.2/manifests/odf-node-recovery-operator.clusterserviceversion.yaml @@ -0,0 +1,310 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: ClusterServiceVersion +metadata: + annotations: + alm-examples: |- + [ + { + "apiVersion": "odf.openshift.io/v1alpha1", + "kind": "NodeRecovery", + "metadata": { + "labels": { + "app.kubernetes.io/created-by": "odf-node-recovery-operator", + "app.kubernetes.io/instance": "noderecovery-sample", + "app.kubernetes.io/managed-by": "kustomize", + "app.kubernetes.io/name": "noderecovery", + "app.kubernetes.io/part-of": "odf-node-recovery-operator" + }, + "name": "noderecovery-sample" + }, + "spec": null + } + ] + capabilities: Basic Install + categories: Storage,OpenShift Optional + containerImage: quay.io/jordigilh/odf-node-recovery-controller@sha256:8f49b0c574c1dac03debb183e33145c6d915c4f9c792b09b30316a5ba4f3b5e9 + createdAt: "2024-11-21T11:07:28Z" + description: ODF Node Recovery is an operator that assist in the recovery of an + ODF cluster that has a device replaced + operators.operatorframework.io/builder: operator-sdk-v1.37.0 + operators.operatorframework.io/project_layout: go.kubebuilder.io/v4 + repository: https://github.com/jordigilh/odf-node-recovery-operator + name: odf-node-recovery-operator.v0.0.2 +spec: + apiservicedefinitions: {} + customresourcedefinitions: + owned: + - kind: NodeRecovery + name: noderecoveries.odf.openshift.io + version: v1alpha1 + description: NodeRecovery is the Schema for the noderecoveries API. + description: ODF Node Recovery is an operator that assist in the recovery of an + ODF cluster that has a device replaced, following the steps defined in https://docs.redhat.com/en/documentation/red_hat_openshift_data_foundation/4.12/html-single/replacing_devices + displayName: ODF Node Recovery Operator + icon: + - base64data: PHN2ZyBpZD0iTGF5ZXJfMSIgZGF0YS1uYW1lPSJMYXllciAxIiB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9IjAgMCAxOTIgMTQ1Ij48ZGVmcz48c3R5bGU+LmNscy0xe2ZpbGw6I2UwMDt9PC9zdHlsZT48L2RlZnM+PHRpdGxlPlJlZEhhdC1Mb2dvLUhhdC1Db2xvcjwvdGl0bGU+PHBhdGggZD0iTTE1Ny43Nyw2Mi42MWExNCwxNCwwLDAsMSwuMzEsMy40MmMwLDE0Ljg4LTE4LjEsMTcuNDYtMzAuNjEsMTcuNDZDNzguODMsODMuNDksNDIuNTMsNTMuMjYsNDIuNTMsNDRhNi40Myw2LjQzLDAsMCwxLC4yMi0xLjk0bC0zLjY2LDkuMDZhMTguNDUsMTguNDUsMCwwLDAtMS41MSw3LjMzYzAsMTguMTEsNDEsNDUuNDgsODcuNzQsNDUuNDgsMjAuNjksMCwzNi40My03Ljc2LDM2LjQzLTIxLjc3LDAtMS4wOCwwLTEuOTQtMS43My0xMC4xM1oiLz48cGF0aCBjbGFzcz0iY2xzLTEiIGQ9Ik0xMjcuNDcsODMuNDljMTIuNTEsMCwzMC42MS0yLjU4LDMwLjYxLTE3LjQ2YTE0LDE0LDAsMCwwLS4zMS0zLjQybC03LjQ1LTMyLjM2Yy0xLjcyLTcuMTItMy4yMy0xMC4zNS0xNS43My0xNi42QzEyNC44OSw4LjY5LDEwMy43Ni41LDk3LjUxLjUsOTEuNjkuNSw5MCw4LDgzLjA2LDhjLTYuNjgsMC0xMS42NC01LjYtMTcuODktNS42LTYsMC05LjkxLDQuMDktMTIuOTMsMTIuNSwwLDAtOC40MSwyMy43Mi05LjQ5LDI3LjE2QTYuNDMsNi40MywwLDAsMCw0Mi41Myw0NGMwLDkuMjIsMzYuMywzOS40NSw4NC45NCwzOS40NU0xNjAsNzIuMDdjMS43Myw4LjE5LDEuNzMsOS4wNSwxLjczLDEwLjEzLDAsMTQtMTUuNzQsMjEuNzctMzYuNDMsMjEuNzdDNzguNTQsMTA0LDM3LjU4LDc2LjYsMzcuNTgsNTguNDlhMTguNDUsMTguNDUsMCwwLDEsMS41MS03LjMzQzIyLjI3LDUyLC41LDU1LC41LDc0LjIyYzAsMzEuNDgsNzQuNTksNzAuMjgsMTMzLjY1LDcwLjI4LDQ1LjI4LDAsNTYuNy0yMC40OCw1Ni43LTM2LjY1LDAtMTIuNzItMTEtMjcuMTYtMzAuODMtMzUuNzgiLz48L3N2Zz4= + mediatype: image/svg+xml + install: + spec: + clusterPermissions: + - rules: + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - update + - apiGroups: + - "" + resources: + - persistentvolumeclaims + - persistentvolumes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods + verbs: + - delete + - deletecollection + - get + - list + - watch + - apiGroups: + - "" + resources: + - pods/exec + verbs: + - create + - apiGroups: + - apps + resources: + - deployments + verbs: + - get + - list + - update + - watch + - apiGroups: + - batch + resources: + - jobs + verbs: + - delete + - get + - apiGroups: + - config.openshift.io + resources: + - clusterversions + verbs: + - get + - list + - watch + - apiGroups: + - ocs.openshift.io + resources: + - ocsinitializations + - storageclusters + verbs: + - get + - list + - update + - watch + - apiGroups: + - odf.openshift.io + resources: + - noderecoveries + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - odf.openshift.io + resources: + - noderecoveries/finalizers + verbs: + - update + - apiGroups: + - odf.openshift.io + resources: + - noderecoveries/status + verbs: + - get + - patch + - update + - apiGroups: + - template.openshift.io + resources: + - templates + verbs: + - get + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create + serviceAccountName: odf-node-recovery-operator-controller-manager + deployments: + - label: + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: odf-node-recovery-operator + app.kubernetes.io/instance: controller-manager + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: deployment + app.kubernetes.io/part-of: odf-node-recovery-operator + control-plane: controller-manager + name: odf-node-recovery-operator-controller-manager + spec: + replicas: 1 + selector: + matchLabels: + control-plane: controller-manager + strategy: {} + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + labels: + control-plane: controller-manager + spec: + containers: + - args: + - --secure-listen-address=0.0.0.0:8443 + - --upstream=http://127.0.0.1:8080/ + - --logtostderr=true + - --v=0 + image: registry.redhat.io/openshift4/ose-kube-rbac-proxy-rhel9@sha256:29201e85bd41642b72c7c0ce915e40aad90823d0efc3e7bbab9c351c92c74341 + name: kube-rbac-proxy + ports: + - containerPort: 8443 + name: https + protocol: TCP + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 5m + memory: 64Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + - args: + - --health-probe-bind-address=:8081 + - --metrics-bind-address=127.0.0.1:8080 + - --leader-elect + command: + - /manager + image: quay.io/jordigilh/odf-node-recovery-controller@sha256:8f49b0c574c1dac03debb183e33145c6d915c4f9c792b09b30316a5ba4f3b5e9 + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: manager + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + serviceAccountName: odf-node-recovery-operator-controller-manager + terminationGracePeriodSeconds: 10 + permissions: + - rules: + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + serviceAccountName: odf-node-recovery-operator-controller-manager + strategy: deployment + installModes: + - supported: false + type: OwnNamespace + - supported: false + type: SingleNamespace + - supported: false + type: MultiNamespace + - supported: true + type: AllNamespaces + keywords: + - odf + - node recovery + links: + - name: GitHub Repository + url: https://github.com/jordigilh/odf-node-recovery-operator + maintainers: + - email: support@redhat.com + name: Red Hat + maturity: alpha + minKubeVersion: 1.25.0 + provider: + name: Red Hat, Inc. + url: www.redhat.com + version: 0.0.2 diff --git a/operators/odf-node-recovery-operator/0.0.2/manifests/odf.openshift.io_noderecoveries.yaml b/operators/odf-node-recovery-operator/0.0.2/manifests/odf.openshift.io_noderecoveries.yaml new file mode 100644 index 00000000000..3d3c09d4258 --- /dev/null +++ b/operators/odf-node-recovery-operator/0.0.2/manifests/odf.openshift.io_noderecoveries.yaml @@ -0,0 +1,170 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.3 + creationTimestamp: null + name: noderecoveries.odf.openshift.io +spec: + group: odf.openshift.io + names: + kind: NodeRecovery + listKind: NodeRecoveryList + plural: noderecoveries + shortNames: + - noderec + singular: noderecovery + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.startTime + name: Created At + type: string + - jsonPath: .status.completionTime + name: Completed At + type: string + - description: Status + jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .status.conditions[?(@.status=="True")].type + name: State + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: NodeRecovery is the Schema for the noderecoveries API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + status: + description: NodeRecoveryStatus defines the observed state of NodeRecovery + properties: + completionTime: + description: |- + Represents time when the job was completed. It is not guaranteed to + be set in happens-before order across separate operations. + It is represented in RFC3339 form and is in UTC. + The completion time is set when the reconciliation finishes successfully, and only then. + The value cannot be updated or removed. The value indicates the same or + later point in time as the startTime field. + format: date-time + type: string + conditions: + description: Current conditions state of CR. + items: + description: PodCondition contains details for the current condition + of this pod. + properties: + lastProbeTime: + description: Last time we probed the condition. + format: date-time + type: string + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. + format: date-time + type: string + message: + description: Human-readable message indicating details about + last transition. + type: string + reason: + description: Unique, one-word, CamelCase reason for the condition's + last transition. + type: string + status: + description: Status is the status of the condition. Can be True, + False, Unknown + enum: + - "True" + - "False" + type: string + type: + description: Type is the type of the condition. + type: string + required: + - lastProbeTime + - lastTransitionTime + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + crashLoopBackOffPods: + description: |- + CrashLoopBackOffPods captures whether there were OSD pods in CrashLoopBackOff when reconciling the CR. This value is used by the reconciler along with the `PendingPods` to determine + if the reconciliation requires a restart the ODF operator. + type: boolean + forcedOSDRemoval: + description: |- + ForcedOSDRemoval indicates if the reconciliation of the CR required to trigger the OSD Removal job with the ForcedOSDRemoval flag as true or false. If true it means the initial attempt to run the job timed out after 10 minutes + and the reconciliation loop triggered a second job with the flag set to true to ensure success. + type: boolean + nodeDevice: + description: NodeDevice contains a list of node name and device name + pair used by the reconciliation to track which nodes and devices + have failed based on the OSD pods that are in CrashLoopbackOff + items: + properties: + nodeName: + type: string + pvName: + type: string + required: + - nodeName + - pvName + type: object + type: array + osdIDs: + description: |- + CrashedOSDDeploymentIDs contains a list of the OSD IDs that match the ceph osd pods that are in CrashLoopbackOff status. This value is used during the reconciliation loop to cleanup the + pods that are not being removed when the deployment is scaled down to 0 + items: + type: string + type: array + pendingPods: + description: |- + PendingPods captures whether there were OSD pods in pending phase when reconciling the CR. This value is used by the reconciler along with `CrashLoopBackOffPods` to determine + if the reconciliation requires a restart of the ODF operator. + type: boolean + phase: + description: |- + INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + Important: Run "make" to regenerate code after modifying this file + enum: + - Running + - Completed + - Failed + type: string + startTime: + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/operators/odf-node-recovery-operator/0.0.2/metadata/annotations.yaml b/operators/odf-node-recovery-operator/0.0.2/metadata/annotations.yaml new file mode 100644 index 00000000000..39ae1f991f1 --- /dev/null +++ b/operators/odf-node-recovery-operator/0.0.2/metadata/annotations.yaml @@ -0,0 +1,15 @@ +annotations: + # Core bundle annotations. + operators.operatorframework.io.bundle.mediatype.v1: registry+v1 + operators.operatorframework.io.bundle.manifests.v1: manifests/ + operators.operatorframework.io.bundle.metadata.v1: metadata/ + operators.operatorframework.io.bundle.package.v1: odf-node-recovery-operator + operators.operatorframework.io.bundle.channels.v1: alpha + operators.operatorframework.io.metrics.builder: operator-sdk-v1.37.0 + operators.operatorframework.io.metrics.mediatype.v1: metrics+v1 + operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v4 + com.redhat.openshift.versions: "v4.12-v4.14" + + # Annotations for testing. + operators.operatorframework.io.test.mediatype.v1: scorecard+v1 + operators.operatorframework.io.test.config.v1: tests/scorecard/ diff --git a/operators/odf-node-recovery-operator/0.0.2/tests/scorecard/config.yaml b/operators/odf-node-recovery-operator/0.0.2/tests/scorecard/config.yaml new file mode 100644 index 00000000000..4e32de13b2e --- /dev/null +++ b/operators/odf-node-recovery-operator/0.0.2/tests/scorecard/config.yaml @@ -0,0 +1,70 @@ +apiVersion: scorecard.operatorframework.io/v1alpha3 +kind: Configuration +metadata: + name: config +stages: +- parallel: true + tests: + - entrypoint: + - scorecard-test + - basic-check-spec + image: quay.io/operator-framework/scorecard-test:v1.35.0 + labels: + suite: basic + test: basic-check-spec-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-bundle-validation + image: quay.io/operator-framework/scorecard-test:v1.35.0 + labels: + suite: olm + test: olm-bundle-validation-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-crds-have-validation + image: quay.io/operator-framework/scorecard-test:v1.35.0 + labels: + suite: olm + test: olm-crds-have-validation-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-crds-have-resources + image: quay.io/operator-framework/scorecard-test:v1.35.0 + labels: + suite: olm + test: olm-crds-have-resources-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-spec-descriptors + image: quay.io/operator-framework/scorecard-test:v1.35.0 + labels: + suite: olm + test: olm-spec-descriptors-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-status-descriptors + image: quay.io/operator-framework/scorecard-test:v1.35.0 + labels: + suite: olm + test: olm-status-descriptors-test + storage: + spec: + mountPath: {} +storage: + spec: + mountPath: {} diff --git a/operators/odf-node-recovery-operator/Makefile b/operators/odf-node-recovery-operator/Makefile new file mode 100644 index 00000000000..59b3c6a5d8d --- /dev/null +++ b/operators/odf-node-recovery-operator/Makefile @@ -0,0 +1,120 @@ +# This Makefile provides a set of targets to generate and validate operator catalogs +# using the Operator Package Manager (opm) tool. + +# The makefile should be placed in the root of the operator repository. +# for example at: /operators//Makefile + +# A user can customize "catalog" target to generate the operator catalog in a way +# that suits the operator. +# OPM allows for the generation of catalogs using different templates. +# - basic: generates a basic catalog +# - semver: generates a catalog with semver versioning + +PDW=$(shell pwd) +OPERATOR_NAME=$(shell basename $(PDW)) +TOPDIR=$(abspath $(dir $(PWD))/../) +BINDIR=${TOPDIR}/bin + +# Add the bin directory to the PATH +export PATH := $(BINDIR):$(PATH) +# A place to store the generated catalogs +CATALOG_DIR=${TOPDIR}/catalogs + +# A place to store the operator catalog templates +OPERATOR_CATALOG_TEMPLATE_DIR = ${PDW}/catalog-templates + +# The operator pipeline image to use for the fbc-onboarding target +OPERATOR_PIPELINE_IMAGE ?= quay.io/redhat-isv/operator-pipelines-images:released + +# Define the paths for both auth files +DOCKER_CONFIG := $(HOME)/.docker/config.json +CONTAINERS_AUTH := $(XDG_RUNTIME_DIR)/containers/auth.json + +# A list of OCP versions to generate catalogs for +# This list can be customized to include the versions that are relevant to the operator +# DO NOT change this line (except for the versions) if you want to take advantage +# of the automated catalog promotion +OCP_VERSIONS=$(shell echo "v4.12 v4.13 v4.14" ) + + +.PHONY: fbc-onboarding +fbc-onboarding: clean + @if [ -f $(DOCKER_CONFIG) ]; then \ + echo "Using Docker config file: $(DOCKER_CONFIG)"; \ + CONFIG_VOLUME="-v $(DOCKER_CONFIG):/root/.docker/config.json"; \ + elif [ -f $(CONTAINERS_AUTH) ]; then \ + echo "Using containers auth file: $(CONTAINERS_AUTH)"; \ + CONFIG_VOLUME="-v $(CONTAINERS_AUTH):/root/.docker/config.json"; \ + else \ + echo "No authentication file found."; \ + fi; \ + podman run \ + --rm \ + --user $(id -u):$(id -g) \ + --security-opt label=disable \ + --pull always \ + -v $(TOPDIR):/workspace \ + $$CONFIG_VOLUME \ + $(OPERATOR_PIPELINE_IMAGE) fbc-onboarding \ + --repo-root /workspace \ + --operator-name $(OPERATOR_NAME) \ + --cache-dir /workspace/.catalog_cache + +.PHONY: catalogs +# replace this stub with one customized to serve your needs ... some examples below + +# here are a few examples of different approaches to fulfilling this target +# comment out / customize the one that makes the most sense, or use them as examples in defining your own +# +# --- BASIC TEMPLATE --- +catalogs: basic +# +# --- SEMVER TEMPLATE --- +#catalogs: semver + + +# basic target provides an example FBC generation from a `basic` template type. +# this example takes a single file as input and generates a well-formed FBC operator contribution as an output +.PHONY: basic +basic: ${BINDIR}/opm clean + for version in $(OCP_VERSIONS); do \ + mkdir -p ${CATALOG_DIR}/$${version}/${OPERATOR_NAME}/ && \ + ${BINDIR}/opm alpha render-template basic -o yaml ${OPERATOR_CATALOG_TEMPLATE_DIR}/$${version}.yaml > ${CATALOG_DIR}/$${version}/${OPERATOR_NAME}/catalog.yaml; \ + done + + +# semver target provides an example FBC generation from a `semver` template type. +# this example takes a single file as input and generates a well-formed FBC operator contribution as an output +.PHONY: semver +semver: ${BINDIR}/opm clean + for version in $(OCP_VERSIONS); do \ + mkdir -p ${CATALOG_DIR}/$${version}/${OPERATOR_NAME}/ && \ + ${BINDIR}/opm alpha render-template semver -o yaml ${OPERATOR_CATALOG_TEMPLATE_DIR}/$${version}.yaml > ${CATALOG_DIR}/$${version}/${OPERATOR_NAME}/catalog.yaml; \ + done + + +# validate-catalogs target illustrates FBC validation +# all FBC must pass opm validation in order to be able to be used in a catalog +.PHONY: validate-catalogs +validate-catalogs: ${BINDIR}/opm + for version in $(OCP_VERSIONS); do \ + ${BINDIR}/opm validate $(CATALOG_DIR)/$${version}/${OPERATOR_NAME} && echo "$${version} catalog validation passed" || echo "$${version} catalog validation failed"; \ + done + +.PHONY: create-catalog-dir +create-catalog-dir: + mkdir -p $(CATALOG_DIR) + +.PHONY: clean +clean: create-catalog-dir + find $(CATALOG_DIR) -type d -name ${OPERATOR_NAME} -exec rm -rf {} + + + +OS=$(shell uname -s | tr '[:upper:]' '[:lower:]') +ARCH=$(shell uname -m | sed 's/x86_64/amd64/') + +# Automatically download the opm binary +OPM_VERSION ?= v1.46.0 +${BINDIR}/opm: + if [ ! -d ${BINDIR} ]; then mkdir -p ${BINDIR}; fi + curl -sLO https://github.com/operator-framework/operator-registry/releases/download/$(OPM_VERSION)/$(OS)-$(ARCH)-opm && chmod +x $(OS)-$(ARCH)-opm && mv $(OS)-$(ARCH)-opm ${BINDIR}/opm