From e20d8997bbb88c87924a758c2fdc69ccf9fc9aad Mon Sep 17 00:00:00 2001 From: Christophe Jauffret Date: Sat, 6 Jan 2024 08:58:49 +0100 Subject: [PATCH] add Nutanix support --- README.md | 2 +- config/control-plane-components.yaml | 2 + config/rbac/role.yaml | 2 + ...jicontrolplane_controller_cluster_patch.go | 6 +- docs/providers-nutanix.md | 190 ++++++++++++++++++ 5 files changed, 199 insertions(+), 3 deletions(-) create mode 100644 docs/providers-nutanix.md diff --git a/README.md b/README.md index b0b861f..ae4eb4d 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ You can refer to the [official documentation website](https://kamaji.clastix.io/ | [Equinix/Packet](https://github.com/kubernetes-sigs/cluster-api-provider-packet) ([technical considerations](docs/providers-packet.md)) | += v0.7.2 | | [KubeVirt](https://github.com/kubernetes-sigs/cluster-api-provider-kubevirt) ([technical considerations](docs/providers-kubevirt.md)) | += 0.1.7 | | [Metal³](https://github.com/metal3-io/cluster-api-provider-metal3) ([technical considerations](docs/providers-metal3.md)) | += 1.4.0 | -| Nutanix | _In road-map_ | +| [Nutanix](https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix) ([technical considerations](docs/providers-nutanix.md)) | += 1.2.4 | | [OpenStack](https://github.com/kubernetes-sigs/cluster-api-provider-openstack) | += 0.8.0 | | Tinkerbell | _In road-map_ | | [vSphere](https://github.com/kubernetes-sigs/cluster-api-provider-vsphere) ([technical considerations](docs/providers-vsphere.md)) | += 1.7.0 | diff --git a/config/control-plane-components.yaml b/config/control-plane-components.yaml index 6802cdb..6aeabcd 100644 --- a/config/control-plane-components.yaml +++ b/config/control-plane-components.yaml @@ -4038,6 +4038,7 @@ rules: - infrastructure.cluster.x-k8s.io resources: - kubevirtclusters + - nutanixclusters - packetclusters verbs: - patch @@ -4045,6 +4046,7 @@ rules: - infrastructure.cluster.x-k8s.io resources: - kubevirtclusters/status + - nutanixclusters/status - packetclusters/status verbs: - patch diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 229b55a..32b5136 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -54,6 +54,7 @@ rules: - infrastructure.cluster.x-k8s.io resources: - kubevirtclusters + - nutanixclusters - packetclusters verbs: - patch @@ -61,6 +62,7 @@ rules: - infrastructure.cluster.x-k8s.io resources: - kubevirtclusters/status + - nutanixclusters/status - packetclusters/status verbs: - patch diff --git a/controllers/kamajicontrolplane_controller_cluster_patch.go b/controllers/kamajicontrolplane_controller_cluster_patch.go index 08b0ed6..e042e53 100644 --- a/controllers/kamajicontrolplane_controller_cluster_patch.go +++ b/controllers/kamajicontrolplane_controller_cluster_patch.go @@ -37,6 +37,8 @@ func (r *KamajiControlPlaneReconciler) patchCluster(ctx context.Context, cluster return r.patchGenericCluster(ctx, cluster, endpoint, port, true) case "Metal3Cluster": return r.checkGenericCluster(ctx, cluster, endpoint, port) + case "NutanixCluster": + return r.patchGenericCluster(ctx, cluster, endpoint, port, true) case "OpenStackCluster": return r.patchOpenStackCluster(ctx, cluster, endpoint, port) case "PacketCluster": @@ -63,8 +65,8 @@ func (r *KamajiControlPlaneReconciler) checkOrPatchVSphereCluster(ctx context.Co return nil } -//+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=kubevirtclusters;packetclusters,verbs=patch -//+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=kubevirtclusters/status;packetclusters/status,verbs=patch +//+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=kubevirtclusters;nutanixclusters;packetclusters,verbs=patch +//+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=kubevirtclusters/status;nutanixclusters/status;packetclusters/status,verbs=patch func (r *KamajiControlPlaneReconciler) patchGenericCluster(ctx context.Context, cluster capiv1beta1.Cluster, endpoint string, port int64, patchStatus bool) error { infraCluster := unstructured.Unstructured{} diff --git a/docs/providers-nutanix.md b/docs/providers-nutanix.md new file mode 100644 index 0000000..12f3596 --- /dev/null +++ b/docs/providers-nutanix.md @@ -0,0 +1,190 @@ +# Kamaji and Nutanix + +The Kamaji Control Plane provider was able to create a _Nutanix_ backed Kubernetes cluster by providing Kamaji Control Planes. + +``` +NAME READY SEVERITY REASON SINCE MESSAGE +Cluster/capi-quickstart True 5m42s +├─ClusterInfrastructure - NutanixCluster/capi-quickstart +├─ControlPlane - KamajiControlPlane/kamaji-nutanix-127 +└─Workers + └─MachineDeployment/capi-quickstart-md-0 True 68s + └─3 Machines... True 5m13s See capi-quickstart-md-0-nfz4l-7hkx7, capi-quickstart-md-0-nfz4l-8wj6v, ... +``` + +## Example manifests + +This example need a Service Load Balancer (MetalLB, Kube-VIP, ...) and [CAAPH](https://github.com/kubernetes-sigs/cluster-api-addon-provider-helm) installed in your management cluster. + +```yaml +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: capi-quickstart + name: capi-quickstart +spec: + clusterNetwork: + apiServerPort: 6443 + pods: + cidrBlocks: + - 192.168.0.0/16 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KamajiControlPlane + name: kamaji-nutanix-127 + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixCluster + name: capi-quickstart +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixCluster +metadata: + name: capi-quickstart +spec: + controlPlaneEndpoint: + host: 0.0.0.0 # will be automatically patch by Kamaji controller + port: 0 # will be automatically patch by Kamaji controller +--- +apiVersion: controlplane.cluster.x-k8s.io/v1alpha1 +kind: KamajiControlPlane +metadata: + name: kamaji-nutanix-127 + namespace: default +spec: + apiServer: + extraArgs: + - --cloud-provider=external + controllerManager: + extraArgs: + - --cloud-provider=external + dataStoreName: default + addons: + coreDNS: { } + kubeProxy: { } + kubelet: + cgroupfs: systemd + preferredAddressTypes: + - ExternalIP + - InternalIP + - Hostname + network: + # serviceAddress: 10.83.1.2 # can be statically assigned + serviceType: LoadBalancer + deployment: + replicas: 2 + version: 1.27.8 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixMachineTemplate +metadata: + name: capi-quickstart-worker +spec: + template: + spec: + bootType: legacy + cluster: + name: cloud-dev + type: name + image: + name: ubuntu-2204-kube-v1.27.8 + type: name + memorySize: 4Gi + providerID: nutanix://mycluster-m1 + subnet: + - name: capi + type: name + systemDiskSize: 40Gi + vcpuSockets: 2 + vcpusPerSocket: 1 +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: capi-quickstart-md-0 +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + eviction-hard: nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<15%,memory.available<100Mi,imagefs.inodesFree<10% + tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 + cloud-provider: external + postKubeadmCommands: + - echo "after kubeadm call" > /var/log/postkubeadm.log + preKubeadmCommands: + - echo "before kubeadm call" > /var/log/prekubeadm.log + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + users: + - lockPassword: false + name: capiuser + sshAuthorizedKeys: + - ssh-ed25519 XXXXXXXXXX # Replace you SSH public key if you want direct access to worker nodes + sudo: ALL=(ALL) NOPASSWD:ALL + verbosity: 10 +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + labels: + cluster.x-k8s.io/cluster-name: capi-quickstart + name: capi-quickstart-md-0 +spec: + clusterName: capi-quickstart + replicas: 3 + selector: + matchLabels: {} + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: capi-quickstart + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: capi-quickstart-md-0 + clusterName: capi-quickstart + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixMachineTemplate + name: capi-quickstart-worker + version: v1.27.8 +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: cilium +spec: + clusterSelector: + matchLabels: + cluster.x-k8s.io/cluster-name: capi-quickstart + releaseName: cilium + repoURL: https://helm.cilium.io/ + chartName: cilium + namespace: kube-system +``` + +## Technical considerations + +The Nutanix Kubernetes cluster is requiring a VIP for the Control Plane component. +To maintain the same experience you have to know in advance the Kamaji Tenant Control Plane address, and port. + +In regard to the address, the following values must be the same: + +- `KamajiControlPlane.spec.network.address` +- `NutanixCluster.spec.controlPlaneEndpoint.host` + +The same applies for the Kubernetes API Server binding port: + +- `Cluster.spec.clusterNetwork.apiServerPort` +- `NutanixCluster.spec.controlPlaneEndpoint.port` + +If you install a Service Load Balancer solution (MetalLB, Kube-VIP, ...) in your management cluster you can skip this kind of check. +VIP will be automaticall assigned and the Kamaji Control Plane provider will take care of patching the `NutanixCluster` resource with the endpoint provided by Kamaji itself. + +## Kubernetes Nutanix Cloud Provider customisation + +WIP