diff --git a/controllers/kuadrant_controller.go b/controllers/kuadrant_controller.go index b43adcdc0..17a0e5162 100644 --- a/controllers/kuadrant_controller.go +++ b/controllers/kuadrant_controller.go @@ -20,6 +20,7 @@ import ( "context" "encoding/json" + "github.com/kuadrant/kuadrant-operator/pkg/kuadranttools" corev1 "k8s.io/api/core/v1" "k8s.io/utils/env" @@ -469,27 +470,54 @@ func (r *KuadrantReconciler) removeAnnotationFromGateways(ctx context.Context, k } func (r *KuadrantReconciler) reconcileLimitador(ctx context.Context, kObj *kuadrantv1beta1.Kuadrant) error { - limitador := &limitadorv1alpha1.Limitador{ - TypeMeta: metav1.TypeMeta{ - Kind: "Limitador", - APIVersion: "limitador.kuadrant.io/v1alpha1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: common.LimitadorName, - Namespace: kObj.Namespace, - }, - Spec: limitadorv1alpha1.LimitadorSpec{ - RateLimitHeaders: &[]limitadorv1alpha1.RateLimitHeadersType{limitadorv1alpha1.RateLimitHeadersTypeDraft03}[0], - Telemetry: &[]limitadorv1alpha1.Telemetry{limitadorv1alpha1.TelemetryExhaustive}[0], - }, + limitadorKey := client.ObjectKey{Name: common.LimitadorName, Namespace: kObj.Namespace} + limitador := &limitadorv1alpha1.Limitador{} + err := r.Client().Get(ctx, limitadorKey, limitador) + if err != nil { + if apierrors.IsNotFound(err) { + limitador = &limitadorv1alpha1.Limitador{ + TypeMeta: metav1.TypeMeta{ + Kind: "Limitador", + APIVersion: "limitador.kuadrant.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: common.LimitadorName, + Namespace: kObj.Namespace, + }, + Spec: limitadorv1alpha1.LimitadorSpec{ + RateLimitHeaders: &[]limitadorv1alpha1.RateLimitHeadersType{limitadorv1alpha1.RateLimitHeadersTypeDraft03}[0], + Telemetry: &[]limitadorv1alpha1.Telemetry{limitadorv1alpha1.TelemetryExhaustive}[0], + }, + } + } else { + return err + } + } + + if kObj.Spec.Limitador != nil { + if kObj.Spec.Limitador.Affinity != nil { + limitador.Spec.Affinity = kObj.Spec.Limitador.Affinity + } + if kObj.Spec.Limitador.PodDisruptionBudget != nil { + limitador.Spec.PodDisruptionBudget = kObj.Spec.Limitador.PodDisruptionBudget + } + if kObj.Spec.Limitador.Replicas != nil { + limitador.Spec.Replicas = kObj.Spec.Limitador.Replicas + } + if kObj.Spec.Limitador.ResourceRequirements != nil { + limitador.Spec.ResourceRequirements = kObj.Spec.Limitador.ResourceRequirements + } + if kObj.Spec.Limitador.Storage != nil { + limitador.Spec.Storage = kObj.Spec.Limitador.Storage + } } - err := r.SetOwnerReference(kObj, limitador) + err = r.SetOwnerReference(kObj, limitador) if err != nil { return err } - return r.ReconcileResource(ctx, &limitadorv1alpha1.Limitador{}, limitador, reconcilers.CreateOnlyMutator) + return r.ReconcileResource(ctx, &limitadorv1alpha1.Limitador{}, limitador, kuadranttools.LimitadorMutator) } func (r *KuadrantReconciler) reconcileAuthorino(ctx context.Context, kObj *kuadrantv1beta1.Kuadrant) error { diff --git a/doc/reference/kuadrant.md b/doc/reference/kuadrant.md new file mode 100644 index 000000000..4bf74d8ce --- /dev/null +++ b/doc/reference/kuadrant.md @@ -0,0 +1,94 @@ +# The Kuadrant Custom Resource Definition (CRD) + +## kuadrant + +
+ Note on Limitador +The Kuadrant operator creates a Limitador CR named `limitador` in the same namespace as the Kuadrant CR. If there is a pre-existing Limitador CR of the same name the kuadrant operator will take ownership of that Limitador CR. +
+ +| **Field** | **Type** | **Required** | **Description** | +|-----------|-----------------------------------|:------------:|-------------------------------------------------| +| `spec` | [KuadrantSpec](#kuadrantspec) | No | The specification for Kuadrant custom resource. | +| `status` | [KuadrantStatus](#kuadrantstatus) | No | The status for the custom resources. | + +## KuadrantSpec + +| **Field** | **Type** | **Required** | **Description** | +|-------------|-------------------------|:------------:|----------------------------------| +| `limitador` | [Limitador](#limitador) | No | Configure limitador deployments. | + +### Limitador + +| **Field** | **Type** | **Required** | **Description** | +|------------------------|------------------------------------------------------------------------------------|:------------:|----------------------------------------------------| +| `affinity` | [Affinity](https://pkg.go.dev/k8s.io/api/core/v1#Affinity) | No | Describes the scheduling rules for limitador pods. | +| `replicas` | Number | No | Sets the number of limitador replicas to deploy. | +| `resourceRequirements` | [ResourceRequirements](https://pkg.go.dev/k8s.io/api/core/v1#ResourceRequirements) | No | Set the resource requirements for limitador pods. | +| `pdb` | [PodDisruptionBudgetType](#poddisruptionbudgettype) | No | Configure allowed pod disruption budget fields. | +| `storage` | [Storage](#storage) | No | Define backend storage option for limitador. | + +### PodDisruptionBudgetType + +| **Field** | **Type** | **Required** | **Description** | +|------------------|----------|:------------:|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `maxUnavailable` | Number | No | An eviction is allowed if at most "maxUnavailable" limitador pods are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with "minAvailable". | +| `minAvailable` | Number | No | An eviction is allowed if at least "minAvailable" limitador pods will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying "100%". | + +### Storage + +| **Field** | **Type** | **Required** | **Description** | +|----------------|-----------------------------|:------------:|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `redis` | [Redis](#redis) | No | Uses Redis to store limitador counters. | +| `redis-cached` | [RedisCached](#redisCached) | No | Uses Redis to store limitador counters, with an in-memory cache | +| `disk` | [Disk](#disk) | No | Counters are held on disk (persistent). Kubernetes [Persistent Volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) will be used to store counters. | + +#### Redis + +| **Field** | **Type** | **Required** | **Description** | +|-------------------|------------------------------------------------------------------------------------|:------------:|-----------------------------------------------------------------| +| `configSecretRef` | [LocalObjectReference](https://pkg.go.dev/k8s.io/api/core/v1#LocalObjectReference) | No | ConfigSecretRef refers to the secret holding the URL for Redis. | + +#### RedisCached + +| **Field** | **Type** | **Required** | **Description** | +|-------------------|------------------------------------------------------------------------------------|:------------:|-----------------------------------------------------------------| +| `configSecretRef` | [LocalObjectReference](https://pkg.go.dev/k8s.io/api/core/v1#LocalObjectReference) | No | ConfigSecretRef refers to the secret holding the URL for Redis. | +| `options` | [Options](#options) | No | Configures a number of caching options for limitador. | + +##### Options + +| **Field** | **Type** | **Required** | **Description** | +|----------------|----------|:------------:|----------------------------------------------------------------------------| +| `ttl` | Number | No | TTL for cached counters in milliseconds [default: 5000] | +| `ratio` | Number | No | Ratio to apply to the TTL from Redis on cached counters [default: 10] | +| `flush-period` | Number | No | FlushPeriod for counters in milliseconds [default: 1000] | +| `max-cached` | Number | No | MaxCached refers to the maximum amount of counters cached [default: 10000] | + +#### Disk + +| **Field** | **Type** | **Required** | **Description** | +|-------------------------|-----------------------------------|:------------:|-----------------------------------------------------------------------------------------------| +| `persistentVolumeClaim` | [PVCGenericSpec](#pvcgenericspec) | No | Configure resources for PVC. | +| `Optimize` | String | No | Defines optimization option of the disk persistence type. Valid options: "throughput", "disk" | + +##### PVCGenericSpec + +| **Field** | **Type** | **Required** | **Description** | +|--------------------|-------------------------------------------------------------------|:------------:|-------------------------------------------------------------------------------| +| `storageClassName` | String | No | Storage class name | +| `resources` | [PersistentVolumeClaimResources](#persistentvolumeclaimresources) | No | Resources represent the minimum resources the volume should have | +| `volumeName` | String | No | VolumeName is the binding reference to the PersistentVolume backing the claim | + +###### PersistentVolumeClaimResources + +| **Field** | **Type** | **Required** | **Description** | +|------------|--------------------------------------------------------------------------------------|:------------:|---------------------------------------------------------------------| +| `requests` | [Quantity](https://pkg.go.dev/k8s.io/apimachinery@v0.28.4/pkg/api/resource#Quantity) | Yes | Storage resources requests to be used on the persisitentVolumeClaim | + +## KuadrantStatus + +| **Field** | **Type** | **Description** | +|----------------------|----------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------| +| `observedGeneration` | String | Number of the last observed generation of the resource. Use it to check if the status info is up to date with latest resource spec. | +| `conditions` | [][ConditionSpec](https://pkg.go.dev/k8s.io/apimachinery@v0.28.4/pkg/apis/meta/v1#Condition) | List of conditions that define that status of the resource. | diff --git a/pkg/kuadranttools/limitador_tools.go b/pkg/kuadranttools/limitador_tools.go new file mode 100644 index 000000000..4710cc2ec --- /dev/null +++ b/pkg/kuadranttools/limitador_tools.go @@ -0,0 +1,53 @@ +package kuadranttools + +import ( + "fmt" + "reflect" + + "github.com/kuadrant/kuadrant-operator/api/v1beta1" + limitadorv1alpha1 "github.com/kuadrant/limitador-operator/api/v1alpha1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func LimitadorMutator(existingObj, desiredObj client.Object) (bool, error) { + update := false + existing, ok := existingObj.(*limitadorv1alpha1.Limitador) + if !ok { + return false, fmt.Errorf("existingObj %T is not a *limitadorv1alpha1.Limitador", existingObj) + } + desired, ok := desiredObj.(*limitadorv1alpha1.Limitador) + if !ok { + return false, fmt.Errorf("desireObj %T is not a *limitadorv1alpha1.Limitador", desiredObj) + } + + if !reflect.DeepEqual(existing.OwnerReferences, desired.OwnerReferences) { + update = true + existing.OwnerReferences = desired.OwnerReferences + } + + existingSpec := limitadorSpecSubSet(existing.Spec) + desiredSpec := limitadorSpecSubSet(desired.Spec) + + if !reflect.DeepEqual(existingSpec, desiredSpec) { + update = true + existing.Spec.Affinity = desired.Spec.Affinity + existing.Spec.PodDisruptionBudget = desired.Spec.PodDisruptionBudget + existing.Spec.Replicas = desired.Spec.Replicas + existing.Spec.ResourceRequirements = desired.Spec.ResourceRequirements + existing.Spec.Storage = desired.Spec.Storage + } + + return update, nil +} + +func limitadorSpecSubSet(spec limitadorv1alpha1.LimitadorSpec) v1beta1.LimitadorSpec { + out := v1beta1.LimitadorSpec{} + + out.Affinity = spec.Affinity + out.PodDisruptionBudget = spec.PodDisruptionBudget + out.Replicas = spec.Replicas + out.ResourceRequirements = spec.ResourceRequirements + out.Storage = spec.Storage + + return out +} diff --git a/pkg/kuadranttools/limitador_tools_test.go b/pkg/kuadranttools/limitador_tools_test.go new file mode 100644 index 000000000..bd531f49a --- /dev/null +++ b/pkg/kuadranttools/limitador_tools_test.go @@ -0,0 +1,198 @@ +//go:build unit + +package kuadranttools + +import ( + "reflect" + "strings" + "testing" + + "k8s.io/utils/ptr" + + "github.com/kuadrant/kuadrant-operator/api/v1beta1" + limitadorv1alpha1 "github.com/kuadrant/limitador-operator/api/v1alpha1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func TestLimitadorMutator(t *testing.T) { + type args struct { + existingObj client.Object + desiredObj client.Object + } + tests := []struct { + name string + args args + want bool + wantErr bool + errorContains string + }{ + { + name: "existingObj is not a limitador type", + wantErr: true, + errorContains: "existingObj", + }, + { + name: "desiredObj is not a limitador type", + args: args{ + existingObj: &limitadorv1alpha1.Limitador{}, + }, + wantErr: true, + errorContains: "desireObj", + }, + { + name: "No update required", + args: args{ + existingObj: &limitadorv1alpha1.Limitador{}, + desiredObj: &limitadorv1alpha1.Limitador{}, + }, + want: false, + }, + { + name: "Update required", + args: args{ + existingObj: &limitadorv1alpha1.Limitador{ + Spec: limitadorv1alpha1.LimitadorSpec{ + Replicas: ptr.To(3), + }, + }, + desiredObj: &limitadorv1alpha1.Limitador{ + Spec: limitadorv1alpha1.LimitadorSpec{ + Replicas: ptr.To(1), + }, + }, + }, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := LimitadorMutator(tt.args.existingObj, tt.args.desiredObj) + if (err != nil) != tt.wantErr { + t.Errorf("LimitadorMutator() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if err != nil && tt.wantErr { + if !strings.Contains(err.Error(), tt.errorContains) { + t.Errorf("LimitadorMutator() error = %v, should contain %v", err, tt.errorContains) + } + } + if got != tt.want { + t.Errorf("LimitadorMutator() got = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_limitadorSpecSubSet(t *testing.T) { + type args struct { + spec limitadorv1alpha1.LimitadorSpec + } + tests := []struct { + name string + args args + want v1beta1.LimitadorSpec + }{ + { + name: "Empty spec passed", + args: args{spec: limitadorv1alpha1.LimitadorSpec{}}, + want: v1beta1.LimitadorSpec{}, + }, + { + name: "Full spec passed", + args: args{spec: limitadorv1alpha1.LimitadorSpec{ + Affinity: &corev1.Affinity{ + PodAffinity: &corev1.PodAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{ + { + Weight: 100, + PodAffinityTerm: corev1.PodAffinityTerm{LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "limitador", + }, + }}, + }, + }, + }, + }, + Replicas: ptr.To(3), + Storage: &limitadorv1alpha1.Storage{ + Redis: &limitadorv1alpha1.Redis{ + ConfigSecretRef: &corev1.LocalObjectReference{ + Name: "secret_config", + }, + }, + }, + PodDisruptionBudget: &limitadorv1alpha1.PodDisruptionBudgetType{ + MinAvailable: &intstr.IntOrString{ + IntVal: 1, + }, + }, + ResourceRequirements: &corev1.ResourceRequirements{ + Limits: corev1.ResourceList{}, + Requests: corev1.ResourceList{}, + }, + }}, + want: v1beta1.LimitadorSpec{ + Affinity: &corev1.Affinity{ + PodAffinity: &corev1.PodAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{ + { + Weight: 100, + PodAffinityTerm: corev1.PodAffinityTerm{LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "limitador", + }, + }}, + }, + }, + }, + }, + Replicas: ptr.To(3), + Storage: &limitadorv1alpha1.Storage{ + Redis: &limitadorv1alpha1.Redis{ + ConfigSecretRef: &corev1.LocalObjectReference{ + Name: "secret_config", + }, + }, + }, + PodDisruptionBudget: &limitadorv1alpha1.PodDisruptionBudgetType{ + MinAvailable: &intstr.IntOrString{ + IntVal: 1, + }, + }, + ResourceRequirements: &corev1.ResourceRequirements{ + Limits: corev1.ResourceList{}, + Requests: corev1.ResourceList{}, + }, + }, + }, + { + name: "Partial spec passed", + args: args{spec: limitadorv1alpha1.LimitadorSpec{ + Replicas: ptr.To(3), + ResourceRequirements: &corev1.ResourceRequirements{ + Limits: corev1.ResourceList{}, + Requests: corev1.ResourceList{}, + }, + }}, + want: v1beta1.LimitadorSpec{ + Replicas: ptr.To(3), + ResourceRequirements: &corev1.ResourceRequirements{ + Limits: corev1.ResourceList{}, + Requests: corev1.ResourceList{}, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := limitadorSpecSubSet(tt.args.spec); !reflect.DeepEqual(got, tt.want) { + t.Errorf("limitadorSpecSubSet() = %v, want %v", got, tt.want) + } + }) + } +}